From 77d167ddbe93340c846546241ef8ad2e2b5952dd Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Wed, 10 May 2023 17:22:20 -0700 Subject: [PATCH 01/21] Add leastConnections strategy and autoscaling functionality --- .../ConnectionPluginManagerBenchmarks.java | 5 +- .../jdbc/benchmarks/PluginBenchmarks.java | 4 +- wrapper/build.gradle.kts | 24 ++ .../amazon/jdbc/ConnectionProvider.java | 19 +- .../jdbc/ConnectionProviderManager.java | 18 ++ .../jdbc/DataSourceConnectionProvider.java | 9 +- .../amazon/jdbc/DriverConnectionProvider.java | 9 +- .../jdbc/HikariPooledConnectionProvider.java | 171 +++++++++-- .../jdbc/plugin/DefaultConnectionPlugin.java | 25 +- .../ReadWriteSplittingPlugin.java | 36 +++ .../src/main/resources/messages.properties | 11 +- .../refactored/TestEnvironmentFeatures.java | 1 + .../refactored/TestEnvironmentInfo.java | 27 ++ .../refactored/TestInstanceInfo.java | 26 +- .../container/ConnectionStringHelper.java | 22 +- .../container/TestDriverProvider.java | 4 +- .../refactored/container/TestEnvironment.java | 2 +- .../tests/AdvancedPerformanceTest.java | 2 +- .../container/tests/AuroraFailoverTest.java | 29 +- .../container/tests/AutoscalingTests.java | 279 ++++++++++++++++++ .../tests/AwsIamIntegrationTest.java | 11 +- .../tests/BasicConnectivityTests.java | 39 +-- .../container/tests/DataCachePluginTests.java | 5 +- .../container/tests/DataSourceTests.java | 7 +- .../DriverConfigurationProfileTests.java | 5 +- .../container/tests/HikariTests.java | 10 +- .../container/tests/LogQueryPluginTests.java | 5 +- .../container/tests/PerformanceTest.java | 2 +- .../tests/ReadWriteSplittingTests.java | 159 ++++++++-- .../container/tests/SpringTests.java | 5 +- .../refactored/host/TestEnvironment.java | 25 +- .../host/TestEnvironmentProvider.java | 25 +- .../integration/util/AuroraTestUtility.java | 108 ++++++- .../integration/util/ContainerHelper.java | 2 +- .../HikariPooledConnectionProviderTest.java | 6 +- 35 files changed, 972 insertions(+), 165 deletions(-) create mode 100644 wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java index d7ff6986e..fb0ea592b 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java @@ -17,6 +17,7 @@ package software.amazon.jdbc.benchmarks; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; @@ -108,8 +109,8 @@ public void setUpIteration() throws Exception { when(mockConnectionProvider.connect(anyString(), any(Properties.class))).thenReturn( mockConnection); - when(mockConnectionProvider.connect(anyString(), any(Dialect.class), any(HostSpec.class), any(Properties.class))) - .thenReturn(mockConnection); + when(mockConnectionProvider.connect(anyString(), any(Dialect.class), any(HostSpec.class), + any(Properties.class), anyBoolean())).thenReturn(mockConnection); when(mockConnection.createStatement()).thenReturn(mockStatement); when(mockStatement.executeQuery(anyString())).thenReturn(mockResultSet); when(mockResultSet.next()).thenReturn(true, true, false); diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java index 5569d88d7..e1615e83c 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java @@ -107,8 +107,8 @@ public void setUpIteration() throws Exception { .thenReturn(mockStatement); when(mockConnectionProvider.connect(anyString(), any(Properties.class))).thenReturn( mockConnection); - when(mockConnectionProvider.connect(anyString(), any(Dialect.class), any(HostSpec.class), any(Properties.class))) - .thenReturn(mockConnection); + when(mockConnectionProvider.connect(anyString(), any(Dialect.class), any(HostSpec.class), + any(Properties.class), anyBoolean())).thenReturn(mockConnection); when(mockConnection.createStatement()).thenReturn(mockStatement); when(mockStatement.executeQuery(anyString())).thenReturn(mockResultSet); when(mockResultSet.next()).thenReturn(true, true, false); diff --git a/wrapper/build.gradle.kts b/wrapper/build.gradle.kts index cc234189b..7ec9d157c 100644 --- a/wrapper/build.gradle.kts +++ b/wrapper/build.gradle.kts @@ -404,3 +404,27 @@ tasks.register("test-aurora-mysql-performance") { systemProperty("test-no-mariadb-engine", "true") } } + +// Autoscaling + +tasks.register("test-autoscaling-only") { + group = "verification" + filter.includeTestsMatching("integration.refactored.host.TestRunner.runTests") + doFirst { + systemProperty("test-autoscaling-only", "true") + systemProperty("test-no-docker", "true") + systemProperty("test-no-performance", "true") + systemProperty("test-no-graalvm", "true") + } +} + +tasks.register("debug-autoscaling-only") { + group = "verification" + filter.includeTestsMatching("integration.refactored.host.TestRunner.debugTests") + doFirst { + systemProperty("test-autoscaling-only", "true") + systemProperty("test-no-docker", "true") + systemProperty("test-no-performance", "true") + systemProperty("test-no-graalvm", "true") + } +} diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java index 2807f5d30..23726b079 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java @@ -18,7 +18,9 @@ import java.sql.Connection; import java.sql.SQLException; +import java.util.EnumSet; import java.util.List; +import java.util.Map; import java.util.Properties; import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.dialect.Dialect; @@ -63,8 +65,9 @@ boolean acceptsUrl( * @throws SQLException if an error occurred while returning the hosts * @throws UnsupportedOperationException if the strategy is unsupported by the provider */ - HostSpec getHostSpecByStrategy(@NonNull List hosts, @NonNull HostRole role, - @NonNull String strategy) throws SQLException, UnsupportedOperationException; + HostSpec getHostSpecByStrategy( + @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy) + throws SQLException; /** * Called once per connection that needs to be created. @@ -80,7 +83,8 @@ Connection connect( @NonNull String protocol, @NonNull Dialect dialect, @NonNull HostSpec hostSpec, - @NonNull Properties props) + @NonNull Properties props, + boolean isInitialConnection) throws SQLException; /** @@ -93,4 +97,13 @@ Connection connect( */ Connection connect(@NonNull String url, @NonNull Properties props) throws SQLException; // TODO: this method is only called by tests/benchmarks and can likely be deprecated + + /** + * Notify this {@link ConnectionProvider} of any changes detected with any of the database + * instances. + * + * @param changes a map from a given database instance's URL to the detected changes for that + * instance + */ + void notifyNodeListChanged(Map> changes); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java index 74cadf021..e9cd39083 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java @@ -17,7 +17,9 @@ package software.amazon.jdbc; import java.sql.SQLException; +import java.util.EnumSet; import java.util.List; +import java.util.Map; import java.util.Properties; import java.util.concurrent.locks.ReentrantReadWriteLock; import software.amazon.jdbc.cleanup.CanReleaseResources; @@ -164,6 +166,22 @@ public HostSpec getHostSpecByStrategy(List hosts, HostRole role, Strin return host; } + public void notifyNodeListChanged(final Map> changes) { + if (connProvider != null) { + connProviderLock.writeLock().lock(); + try { + if (connProvider != null) { + connProvider.notifyNodeListChanged(changes); + } + } catch (UnsupportedOperationException e) { + // The custom provider does not support the provided strategy, ignore it and try with the default provider. + } finally { + connProviderLock.writeLock().unlock(); + } + } + defaultProvider.notifyNodeListChanged(changes); + } + /** * Clears the non-default {@link ConnectionProvider} if it has been set. The default * ConnectionProvider will be used if the non-default ConnectionProvider has not been set or has diff --git a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java index 4a00655cc..0f7435809 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java @@ -22,6 +22,7 @@ import java.sql.Connection; import java.sql.SQLException; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -122,7 +123,8 @@ public Connection connect( final @NonNull String protocol, final @NonNull Dialect dialect, final @NonNull HostSpec hostSpec, - final @NonNull Properties props) + final @NonNull Properties props, + final boolean isInitialConnection) throws SQLException { final Properties copy = PropertyUtils.copyProperties(props); @@ -222,4 +224,9 @@ private DataSource createDataSource() throws SQLException { throw new SQLException(instEx.getMessage(), SqlState.UNKNOWN_STATE.getState(), instEx); } } + + @Override + public void notifyNodeListChanged(Map> changes) { + // Do nothing + } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java index 4c97729d5..29291345a 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java @@ -19,6 +19,7 @@ import java.sql.Connection; import java.sql.SQLException; import java.util.Collections; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -101,7 +102,8 @@ public Connection connect( final @NonNull String protocol, final @NonNull Dialect dialect, final @NonNull HostSpec hostSpec, - final @NonNull Properties props) + final @NonNull Properties props, + final boolean isInitialConnection) throws SQLException { final String databaseName = PropertyDefinition.DATABASE.getString(props) != null @@ -146,4 +148,9 @@ public Connection connect(@NonNull final String url, @NonNull final Properties p LOGGER.finest(() -> "Connecting to " + url); return this.driver.connect(url, props); } + + @Override + public void notifyNodeListChanged(Map> changes) { + // Do nothing + } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index 7ada77159..3940f511e 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -21,6 +21,9 @@ import java.sql.Connection; import java.sql.SQLException; import java.util.Collections; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Properties; @@ -28,10 +31,12 @@ import java.util.StringJoiner; import java.util.concurrent.ConcurrentHashMap; import java.util.logging.Logger; +import java.util.stream.Collectors; import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.cleanup.CanReleaseResources; import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.util.HikariCPSQLException; +import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.RdsUrlType; import software.amazon.jdbc.util.RdsUtils; import software.amazon.jdbc.util.StringUtils; @@ -43,7 +48,11 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, Logger.getLogger(HikariPooledConnectionProvider.class.getName()); private static final RdsUtils rdsUtils = new RdsUtils(); - private static final Map databasePools = new ConcurrentHashMap<>(); + + // 2-layer map that helps us keep track of how many pools are associated with each database + // instance. The first layer maps from an instance URL to the 2nd map. The 2nd layer maps from + // the key returned from the poolMapping function to the database pool. + private static final Map> databasePools = new ConcurrentHashMap<>(); private final HikariPoolConfigurator poolConfigurator; private final HikariPoolMapping poolMapping; protected int retries = 10; @@ -101,15 +110,48 @@ public boolean acceptsUrl( @Override public boolean acceptsStrategy(@NonNull HostRole role, @NonNull String strategy) { - return false; + return "leastConnections".equals(strategy); } @Override public HostSpec getHostSpecByStrategy( - @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy) { - // This class does not accept any strategy, so the ConnectionProviderManager should prevent us - // from getting here. - return null; + @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy) + throws SQLException { + if (!"leastConnections".equals(strategy)) { + throw new UnsupportedOperationException( + Messages.get( + "ConnectionProvider.unsupportedHostSpecSelectorStrategy", + new Object[] {strategy, HikariPooledConnectionProvider.class})); + } + + // Remove hosts with the wrong role + List eligibleHosts = hosts.stream() + .filter(hostSpec -> role.equals(hostSpec.getRole())) + .collect(Collectors.toList()); + if (eligibleHosts.size() == 0) { + throw new SQLException(Messages.get("HostSelector.noHostsMatchingRole", new Object[]{role})); + } + + if (HostRole.WRITER.equals(role)) { + // Assuming there is only one writer, there are no other valid hosts to return, so we do + // not have to sort by least connections. + return eligibleHosts.get(0); + } + + eligibleHosts.sort((hostSpec1, hostSpec2) -> + getNumConnections(hostSpec1) - getNumConnections(hostSpec2)); + return eligibleHosts.get(0); + } + + private int getNumConnections(HostSpec hostSpec) { + Map hostPools = databasePools.get(hostSpec.getUrl()); + int numConnections = 0; + if (hostPools != null) { + for (HikariDataSource ds : hostPools.values()) { + numConnections += ds.getHikariPoolMXBean().getActiveConnections(); + } + } + return numConnections; } @Override @@ -117,26 +159,73 @@ public Connection connect( @NonNull String protocol, @NonNull Dialect dialect, @NonNull HostSpec hostSpec, - @NonNull Properties props) + @NonNull Properties props, + boolean isInitialConnection) throws SQLException { - final HikariDataSource ds = databasePools.computeIfAbsent( - getPoolKey(hostSpec, props), - url -> createHikariDataSource(protocol, hostSpec, props) + // TODO: do we need to lock databasePools when we are interacting with it? Even though it is + // a ConcurrentHashMap, it contains another ConcurrentHashMap. Could threads interleave + // between getting the inner hashmap and updating/creating it? + final Map hostPools = databasePools.computeIfAbsent( + hostSpec.getUrl(), + url -> new HashMap<>() ); + final String poolKey = getPoolKey(hostSpec, props); + final HikariDataSource ds = hostPools.computeIfAbsent( + poolKey, + lambdaPoolKey -> createHikariDataSource(protocol, hostSpec, props) + ); ds.setPassword(props.getProperty(PropertyDefinition.PASSWORD.name)); - Connection conn = ds.getConnection(); - int count = 0; - while (conn != null && count++ < retries && !conn.isValid(3)) { - ds.evictConnection(conn); - conn = ds.getConnection(); + + Connection conn = null; + SQLException latestConnectException = null; + for (int i = 0; i < retries + 1; i++) { + try { + conn = ds.getConnection(); + if (conn != null && !conn.isValid(3)) { + ds.evictConnection(conn); + } + break; + } catch (SQLException e) { + latestConnectException = e; + if (conn != null) { + ds.evictConnection(conn); + } + } + } + + if (conn != null) { + return conn; + } + + // Data source is not working + ds.close(); + hostPools.remove(poolKey); + if (hostPools.size() == 0) { + databasePools.remove(hostSpec.getUrl()); + } + + if (isInitialConnection) { + String errorMessage = latestConnectException == null + ? Messages.get("HikariPooledConnectionProvider.errorConnectingWithDataSource", + new Object[]{hostSpec.getUrl()}) + : Messages.get("HikariPooledConnectionProvider.errorConnectingWithDataSourceWithCause", + new Object[] {hostSpec.getUrl(), latestConnectException.getMessage()}); + throw new SQLException(errorMessage); + } else { + // try to connect to another instance } - return conn; + + String errorMessage = latestConnectException == null + ? Messages.get("HikariPooledConnectionProvider.errorConnectingWithDataSource", + new Object[]{hostSpec.getUrl()}) + : Messages.get("HikariPooledConnectionProvider.errorConnectingWithDataSourceWithCause", + new Object[] {hostSpec.getUrl(), latestConnectException.getMessage()}); + throw new SQLException(errorMessage); } @Override - public Connection connect( - @NonNull String url, @NonNull Properties props) throws SQLException { + public Connection connect(@NonNull String url, @NonNull Properties props) throws SQLException { // This method is only called by tests/benchmarks return null; } @@ -150,9 +239,36 @@ private String getPoolKey(HostSpec hostSpec, Properties props) { return sb.toString(); } + @Override + public void notifyNodeListChanged(Map> changes) { + for (Map.Entry> hostChangesEntry : changes.entrySet()) { + final EnumSet hostChanges = hostChangesEntry.getValue(); + // TODO: do any other node changes need to be checked and processed? + if (!hostChanges.contains(NodeChangeOptions.NODE_DELETED)) { + continue; + } + + final String hostUrl = hostChangesEntry.getKey(); + final Map hostPools = databasePools.get(hostUrl); + if (hostPools != null) { + for (Map.Entry poolEntry : hostPools.entrySet()) { + final HikariDataSource ds = poolEntry.getValue(); + if (ds != null) { + ds.close(); + } + } + hostPools.clear(); + } + databasePools.remove(hostUrl); + } + } + @Override public void releaseResources() { - databasePools.forEach((String url, HikariDataSource ds) -> ds.close()); + databasePools.forEach((url, hostPools) -> { + hostPools.forEach((poolKey, ds) -> ds.close()); + hostPools.clear(); + }); databasePools.clear(); } @@ -208,14 +324,27 @@ public int getHostCount() { } /** - * Returns a set containing every key associated with an active connection pool. + * Returns a set containing every host URL for which there are one or more connection pool(s). * - * @return a set containing every key associated with an active connection pool + * @return a set containing every host URL for which there are one or more connection pool(s). */ public Set getHosts() { return Collections.unmodifiableSet(databasePools.keySet()); } + /** + * Returns a set containing every key associated with an active connection pool. + * + * @return a set containing every key associated with an active connection pool + */ + public Set getKeys() { + Set keys = new HashSet<>(); + for (Map pool : databasePools.values()) { + keys.addAll(pool.keySet()); + } + return keys; + } + /** * Logs information for every active connection pool. */ diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java index cdb5ca5e8..7704a9128 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java @@ -41,7 +41,6 @@ import software.amazon.jdbc.OldConnectionSuggestedAction; import software.amazon.jdbc.PluginManagerService; import software.amazon.jdbc.PluginService; -import software.amazon.jdbc.util.DriverInfo; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.SqlMethodAnalyzer; import software.amazon.jdbc.util.WrapperUtils; @@ -146,13 +145,18 @@ public Connection connect( // It's guaranteed that this plugin is always the last in plugin chain so connectFunc can be // ignored. - return connectInternal(driverProtocol, hostSpec, props, connProvider); + return connectInternal(driverProtocol, hostSpec, props, isInitialConnection, connProvider); } private Connection connectInternal( - String driverProtocol, HostSpec hostSpec, Properties props, ConnectionProvider connProvider) + String driverProtocol, + HostSpec hostSpec, + Properties props, + boolean isInitialConnection, + ConnectionProvider connProvider) throws SQLException { - final Connection conn = connProvider.connect(driverProtocol, this.pluginService.getDialect(), hostSpec, props); + final Connection conn = connProvider.connect( + driverProtocol, this.pluginService.getDialect(), hostSpec, props, isInitialConnection); this.pluginService.setAvailability(hostSpec.asAliases(), HostAvailability.AVAILABLE); this.pluginService.updateDialect(conn); @@ -172,17 +176,26 @@ public Connection forceConnect( // It's guaranteed that this plugin is always the last in plugin chain so forceConnectFunc can be // ignored. - return connectInternal(driverProtocol, hostSpec, props, connProvider); + return connectInternal(driverProtocol, hostSpec, props, isInitialConnection, connProvider); } @Override public boolean acceptsStrategy(HostRole role, String strategy) { + if (HostRole.UNKNOWN.equals(role)) { + // Users must request either a writer or a reader role. + return false; + } return this.connProviderManager.acceptsStrategy(role, strategy); } @Override public HostSpec getHostSpecByStrategy(HostRole role, String strategy) throws SQLException { + if (HostRole.UNKNOWN.equals(role)) { + // Users must request either a writer or a reader role. + throw new SQLException("DefaultConnectionPlugin.unknownRoleRequested"); + } + List hosts = this.pluginService.getHosts(); if (hosts.size() < 1) { throw new SQLException(Messages.get("DefaultConnectionPlugin.noHostsAvailable")); @@ -213,7 +226,7 @@ public OldConnectionSuggestedAction notifyConnectionChanged( @Override public void notifyNodeListChanged(final Map> changes) { - // do nothing + this.connProviderManager.notifyNodeListChanged(changes); } List parseMultiStatementQueries(String query) { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java index e108780b9..fbeb5bd08 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java @@ -22,6 +22,7 @@ import java.util.EnumSet; import java.util.HashSet; import java.util.List; +import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.logging.Logger; @@ -51,6 +52,7 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin add("initHostProvider"); add("connect"); add("notifyConnectionChanged"); + add("notifyNodeListChanged"); add("Connection.setReadOnly"); } }); @@ -62,6 +64,7 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin private volatile boolean inReadWriteSplit = false; private HostListProviderService hostListProviderService; private Connection writerConnection; + private HostSpec writerHostSpec; private Connection readerConnection; private HostSpec readerHostSpec; @@ -177,6 +180,36 @@ public OldConnectionSuggestedAction notifyConnectionChanged( return OldConnectionSuggestedAction.NO_OPINION; } + @Override + public void notifyNodeListChanged(final Map> changes) { + for (Map.Entry> entry : changes.entrySet()) { + EnumSet hostChanges = entry.getValue(); + if (!hostChanges.contains(NodeChangeOptions.NODE_DELETED)) { + continue; + } + + if (writerHostSpec != null && writerHostSpec.getUrl().equals(entry.getKey())) { + try { + writerConnection.close(); + } catch (SQLException e) { + // Do nothing + } + writerConnection = null; + writerHostSpec = null; + } + + if (readerHostSpec != null && readerHostSpec.getUrl().equals(entry.getKey())) { + try { + readerConnection.close(); + } catch (SQLException e) { + // Do nothing + } + readerConnection = null; + readerHostSpec = null; + } + } + } + @Override public T execute( final Class resultClass, @@ -250,6 +283,7 @@ private void getNewWriterConnection(final HostSpec writerHostSpec) throws SQLExc private void setWriterConnection(final Connection writerConnection, final HostSpec writerHostSpec) { this.writerConnection = writerConnection; + this.writerHostSpec = writerHostSpec; LOGGER.finest( () -> Messages.get( "ReadWriteSplittingPlugin.setWriterConnection", @@ -507,10 +541,12 @@ private void closeConnectionIfIdle(final Connection internalConnection) { internalConnection.close(); if (internalConnection == writerConnection) { writerConnection = null; + writerHostSpec = null; } if (internalConnection == readerConnection) { readerConnection = null; + readerHostSpec = null; } } } catch (final SQLException e) { diff --git a/wrapper/src/main/resources/messages.properties b/wrapper/src/main/resources/messages.properties index f97cb44b4..6002821b4 100644 --- a/wrapper/src/main/resources/messages.properties +++ b/wrapper/src/main/resources/messages.properties @@ -110,6 +110,7 @@ DataCacheConnectionPlugin.queryResultsCached=[{0}] Query results will be cached: # Default Connection Plugin DefaultConnectionPlugin.executingMethod=Executing method: ''{0}'' DefaultConnectionPlugin.noHostsAvailable=The default connection plugin received an empty host list from the plugin service. +DefaultConnectionPlugin.unknownRoleRequested=A HostSpec with a role of HostRole.UNKNOWN was requested via getHostSpecByStrategy. The requested role must be either HostRole.WRITER or HostRole.READER # Driver Driver.nullUrl=Url is null. @@ -138,6 +139,10 @@ Failover.invalidNode=Node is no longer available in the topology: {0} Failover.failedToUpdateCurrentHostspecAvailability=Failed to update current hostspec availability. Failover.noOperationsAfterConnectionClosed=No operations allowed after connection closed. +# HikariPooledConnectionProvider +HikariPooledConnectionProvider.errorConnectingWithDataSource=Unable to connect to ''{0}'' using the Hikari data source. +HikariPooledConnectionProvider.errorConnectingWithDataSourceWithCause=Unable to connect to ''{0}'' using the Hikari data source. Exception message: ''{1}'' + # Host Monitoring Connection Plugin HostMonitoringConnectionPlugin.activatedMonitoring=Executing method ''{0}'', monitoring is activated. HostMonitoringConnectionPlugin.monitoringDeactivated=Monitoring deactivated for method ''{0}''. @@ -145,6 +150,9 @@ HostMonitoringConnectionPlugin.unavailableNode=Node ''{0}'' is unavailable. HostMonitoringConnectionPlugin.errorIdentifyingConnection=Error occurred while identifying connection: ''{0}''. HostMonitoringConnectionPlugin.unableToIdentifyConnection=Unable to identify the given connection: ''{0}'', please ensure the correct host list provider is specified. The host list provider in use is: ''{1}''. +# HostSelector +HostSelector.noHostsMatchingRole=No hosts were found matching the requested ''{0}'' role. + # IAM Auth Connection Plugin IamAuthConnectionPlugin.unsupportedHostname=Unsupported AWS hostname {0}. Amazon domain name in format *.AWS-Region.rds.amazonaws.com or *.rds.AWS-Region.amazonaws.com.cn is expected. IamAuthConnectionPlugin.useCachedIamToken=Use cached IAM token = ''{0}'' @@ -185,9 +193,6 @@ PropertyUtils.setMethodDoesNotExistOnTarget=Set method for property ''{0}'' does PropertyUtils.failedToSetProperty=Failed to set property ''{0}'' on target ''{1}''. PropertyUtils.failedToSetPropertyWithReason=Failed to set property ''{0}'' on target ''{1}''. {2} -# RandomHostSelector -RandomHostSelector.noHostsMatchingRole=No hosts were found matching the requested ''{0}'' role. - # Read Write Splitting Plugin ReadWriteSplittingPlugin.setReadOnlyOnClosedConnection=setReadOnly cannot be called on a closed connection. ReadWriteSplittingPlugin.errorSwitchingToReader=An error occurred while trying to switch to a reader connection. diff --git a/wrapper/src/test/java/integration/refactored/TestEnvironmentFeatures.java b/wrapper/src/test/java/integration/refactored/TestEnvironmentFeatures.java index 1b9b7e4e4..aabd5ffe2 100644 --- a/wrapper/src/test/java/integration/refactored/TestEnvironmentFeatures.java +++ b/wrapper/src/test/java/integration/refactored/TestEnvironmentFeatures.java @@ -28,4 +28,5 @@ public enum TestEnvironmentFeatures { SKIP_PG_DRIVER_TESTS, SKIP_MARIADB_DRIVER_TESTS, RUN_HIBERNATE_TESTS_ONLY, + RUN_AUTOSCALING_TESTS_ONLY } diff --git a/wrapper/src/test/java/integration/refactored/TestEnvironmentInfo.java b/wrapper/src/test/java/integration/refactored/TestEnvironmentInfo.java index 363c861f5..b4e4e1d6f 100644 --- a/wrapper/src/test/java/integration/refactored/TestEnvironmentInfo.java +++ b/wrapper/src/test/java/integration/refactored/TestEnvironmentInfo.java @@ -30,6 +30,9 @@ public class TestEnvironmentInfo { private TestDatabaseInfo databaseInfo; private TestProxyDatabaseInfo proxyDatabaseInfo; + private String databaseEngine; + private String databaseEngineVersion; + private String databaseInstanceClass; public TestDatabaseInfo getDatabaseInfo() { return this.databaseInfo; @@ -39,6 +42,18 @@ public TestProxyDatabaseInfo getProxyDatabaseInfo() { return this.proxyDatabaseInfo; } + public String getDatabaseEngine() { + return databaseEngine; + } + + public String getDatabaseEngineVersion() { + return databaseEngineVersion; + } + + public String getDatabaseInstanceClass() { + return databaseInstanceClass; + } + public TestEnvironmentRequest getRequest() { return this.request; } @@ -87,6 +102,18 @@ public void setProxyDatabaseInfo(TestProxyDatabaseInfo proxyDatabaseInfo) { this.proxyDatabaseInfo = proxyDatabaseInfo; } + public void setDatabaseEngine(String databaseEngine) { + this.databaseEngine = databaseEngine; + } + + public void setDatabaseEngineVersion(String databaseEngineVersion) { + this.databaseEngineVersion = databaseEngineVersion; + } + + public void setDatabaseInstanceClass(String databaseInstanceClass) { + this.databaseInstanceClass = databaseInstanceClass; + } + public void setAwsAccessKeyId(String awsAccessKeyId) { this.awsAccessKeyId = awsAccessKeyId; } diff --git a/wrapper/src/test/java/integration/refactored/TestInstanceInfo.java b/wrapper/src/test/java/integration/refactored/TestInstanceInfo.java index 7b0a68c7a..54de471a2 100644 --- a/wrapper/src/test/java/integration/refactored/TestInstanceInfo.java +++ b/wrapper/src/test/java/integration/refactored/TestInstanceInfo.java @@ -23,32 +23,36 @@ public class TestInstanceInfo { private String instanceId; // "instance-1" - private String endpoint; // "instance-1.ABC.cluster-XYZ.us-west-2.rds.amazonaws.com" - private int endpointPort; + private String host; // "instance-1.ABC.cluster-XYZ.us-west-2.rds.amazonaws.com" + private int port; // This constructor should NOT be used in the code. It's required for serialization. public TestInstanceInfo() { } - public TestInstanceInfo(String instanceId, String endpoint, int endpointPort) { + public TestInstanceInfo(String instanceId, String host, int port) { this.instanceId = instanceId; - this.endpoint = endpoint; - this.endpointPort = endpointPort; + this.host = host; + this.port = port; } public String getInstanceId() { return this.instanceId; } - public String getEndpoint() { - return this.endpoint; + public String getHost() { + return this.host; } - public String getUrl() { - return getEndpoint() + ":" + getEndpointPort(); + public int getPort() { + return this.port; } - public int getEndpointPort() { - return this.endpointPort; + public String getUrl() { + String url = host + ":" + port; + if (!url.endsWith("/")) { + url += "/"; + } + return url; } } diff --git a/wrapper/src/test/java/integration/refactored/container/ConnectionStringHelper.java b/wrapper/src/test/java/integration/refactored/container/ConnectionStringHelper.java index bc0045875..8eac098f8 100644 --- a/wrapper/src/test/java/integration/refactored/container/ConnectionStringHelper.java +++ b/wrapper/src/test/java/integration/refactored/container/ConnectionStringHelper.java @@ -18,6 +18,7 @@ import integration.refactored.DatabaseEngine; import integration.refactored.DriverHelper; +import integration.refactored.TestInstanceInfo; import java.util.Properties; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.util.StringUtils; @@ -36,13 +37,13 @@ public static String getUrl(final String wrapperPlugins) { .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint(), + .getHost(), TestEnvironment.getCurrent() .getInfo() .getDatabaseInfo() .getInstances() .get(0) - .getEndpointPort(), + .getPort(), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName(), wrapperPlugins); } @@ -84,16 +85,25 @@ public static String getWrapperUrl() { .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint(), + .getHost(), TestEnvironment.getCurrent() .getInfo() .getDatabaseInfo() .getInstances() .get(0) - .getEndpointPort(), + .getPort(), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()); } + public static String getWrapperUrl(TestInstanceInfo instance) { + return getWrapperUrl( + TestEnvironment.getCurrent().getCurrentDriver(), + instance.getHost(), + instance.getPort(), + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()); + } + + public static String getWrapperUrl(String host, int port, String databaseName) { return getWrapperUrl(TestEnvironment.getCurrent().getCurrentDriver(), host, port, databaseName); } @@ -133,13 +143,13 @@ public static String getProxyWrapperUrl() { .getProxyDatabaseInfo() .getInstances() .get(0) - .getEndpoint(), + .getHost(), TestEnvironment.getCurrent() .getInfo() .getProxyDatabaseInfo() .getInstances() .get(0) - .getEndpointPort(), + .getPort(), TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getDefaultDbName()); } diff --git a/wrapper/src/test/java/integration/refactored/container/TestDriverProvider.java b/wrapper/src/test/java/integration/refactored/container/TestDriverProvider.java index 1ede1e1fe..0bc81bd11 100644 --- a/wrapper/src/test/java/integration/refactored/container/TestDriverProvider.java +++ b/wrapper/src/test/java/integration/refactored/container/TestDriverProvider.java @@ -151,12 +151,12 @@ public void beforeEach(ExtensionContext context) throws Exception { startTimeNano = System.nanoTime(); String clusterInetAddress = auroraUtil.hostToIP(dbInfo.getClusterEndpoint()); String writerInetAddress = - auroraUtil.hostToIP(dbInfo.getInstances().get(0).getEndpoint()); + auroraUtil.hostToIP(dbInfo.getInstances().get(0).getHost()); while (!writerInetAddress.equals(clusterInetAddress) && TimeUnit.NANOSECONDS.toMinutes(System.nanoTime() - startTimeNano) < 5) { clusterInetAddress = auroraUtil.hostToIP(dbInfo.getClusterEndpoint()); writerInetAddress = - auroraUtil.hostToIP(dbInfo.getInstances().get(0).getEndpoint()); + auroraUtil.hostToIP(dbInfo.getInstances().get(0).getHost()); Thread.sleep(5000); } assertTrue(writerInetAddress.equals(clusterInetAddress)); diff --git a/wrapper/src/test/java/integration/refactored/container/TestEnvironment.java b/wrapper/src/test/java/integration/refactored/container/TestEnvironment.java index a8b668d79..ec7e56025 100644 --- a/wrapper/src/test/java/integration/refactored/container/TestEnvironment.java +++ b/wrapper/src/test/java/integration/refactored/container/TestEnvironment.java @@ -83,7 +83,7 @@ private static void initProxies(TestEnvironment environment) { int proxyControlPort = environment.info.getProxyDatabaseInfo().getControlPort(); for (TestInstanceInfo instance : environment.info.getProxyDatabaseInfo().getInstances()) { - ToxiproxyClient client = new ToxiproxyClient(instance.getEndpoint(), proxyControlPort); + ToxiproxyClient client = new ToxiproxyClient(instance.getHost(), proxyControlPort); List proxies; try { proxies = client.getProxies(); diff --git a/wrapper/src/test/java/integration/refactored/container/tests/AdvancedPerformanceTest.java b/wrapper/src/test/java/integration/refactored/container/tests/AdvancedPerformanceTest.java index 6787b1aea..12999354e 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/AdvancedPerformanceTest.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/AdvancedPerformanceTest.java @@ -234,7 +234,7 @@ private void ensureDnsHealthy() throws UnknownHostException, InterruptedExceptio .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint()) + .getHost()) .getHostAddress(); LOGGER.finest("Writer resolves to " + writerIpAddress); LOGGER.finest( diff --git a/wrapper/src/test/java/integration/refactored/container/tests/AuroraFailoverTest.java b/wrapper/src/test/java/integration/refactored/container/tests/AuroraFailoverTest.java index 2797f1cca..7c77b63ba 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/AuroraFailoverTest.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/AuroraFailoverTest.java @@ -64,7 +64,10 @@ @TestMethodOrder(MethodOrderer.MethodName.class) @ExtendWith(TestDriverProvider.class) @EnableOnTestFeature(TestEnvironmentFeatures.FAILOVER_SUPPORTED) -@DisableOnTestFeature({TestEnvironmentFeatures.PERFORMANCE, TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY}) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY}) @EnableOnNumOfInstances(min = 2) @MakeSureFirstInstanceWriter public class AuroraFailoverTest { @@ -102,8 +105,8 @@ public void test_failFromWriterToNewWriter_failOnConnectionInvocation() try (final Connection conn = DriverManager.getConnection( ConnectionStringHelper.getWrapperUrl( - initialWriterInstanceInfo.getEndpoint(), - initialWriterInstanceInfo.getEndpointPort(), + initialWriterInstanceInfo.getHost(), + initialWriterInstanceInfo.getPort(), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()), props)) { @@ -138,8 +141,8 @@ public void test_failFromWriterToNewWriter_failOnConnectionBoundObjectInvocation try (final Connection conn = DriverManager.getConnection( ConnectionStringHelper.getWrapperUrl( - initialWriterInstanceInfo.getEndpoint(), - initialWriterInstanceInfo.getEndpointPort(), + initialWriterInstanceInfo.getHost(), + initialWriterInstanceInfo.getPort(), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()), props)) { @@ -174,8 +177,8 @@ public void test_failFromReaderToWriter() throws SQLException { try (final Connection conn = DriverManager.getConnection( ConnectionStringHelper.getWrapperUrl( - instanceInfo.getEndpoint(), - instanceInfo.getEndpointPort(), + instanceInfo.getHost(), + instanceInfo.getPort(), TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getDefaultDbName()), props)) { @@ -208,8 +211,8 @@ public void test_writerFailWithinTransaction_setAutoCommitFalse() try (final Connection conn = DriverManager.getConnection( ConnectionStringHelper.getWrapperUrl( - initialWriterInstanceInfo.getEndpoint(), - initialWriterInstanceInfo.getEndpointPort(), + initialWriterInstanceInfo.getHost(), + initialWriterInstanceInfo.getPort(), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()), props)) { @@ -267,8 +270,8 @@ public void test_writerFailWithinTransaction_startTransaction() try (final Connection conn = DriverManager.getConnection( ConnectionStringHelper.getWrapperUrl( - initialWriterInstanceInfo.getEndpoint(), - initialWriterInstanceInfo.getEndpointPort(), + initialWriterInstanceInfo.getHost(), + initialWriterInstanceInfo.getPort(), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()), props)) { @@ -383,7 +386,7 @@ public void test_DataSourceWriterConnection_BasicFailover() try (final Connection conn = createDataSourceConnectionWithFailoverUsingInstanceId( - initialWriterInstanceInfo.getEndpoint())) { + initialWriterInstanceInfo.getHost())) { // Trigger failover auroraUtil.failoverClusterToATargetAndWaitUntilWriterChanged( @@ -400,7 +403,7 @@ public void test_DataSourceWriterConnection_BasicFailover() final String nextWriterId = instanceIDs.get(0); LOGGER.fine("currentConnectionObject: " + conn.unwrap(Connection.class)); - LOGGER.fine("initialWriterInstanceInfo endpoint: " + initialWriterInstanceInfo.getEndpoint()); + LOGGER.fine("initialWriterInstanceInfo endpoint: " + initialWriterInstanceInfo.getHost()); LOGGER.fine("currentConnectionId: " + currentConnectionId); LOGGER.fine("nextWriterId: " + nextWriterId); LOGGER.fine("nominatedWriterId: " + nominatedWriterId); diff --git a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java new file mode 100644 index 000000000..008e8d5e4 --- /dev/null +++ b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java @@ -0,0 +1,279 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package integration.refactored.container.tests; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import com.zaxxer.hikari.HikariConfig; +import integration.refactored.DatabaseEngineDeployment; +import integration.refactored.DriverHelper; +import integration.refactored.TestEnvironmentFeatures; +import integration.refactored.TestEnvironmentInfo; +import integration.refactored.TestInstanceInfo; +import integration.refactored.container.ConnectionStringHelper; +import integration.refactored.container.TestDriverProvider; +import integration.refactored.container.TestEnvironment; +import integration.refactored.container.condition.EnableOnDatabaseEngineDeployment; +import integration.refactored.container.condition.EnableOnNumOfInstances; +import integration.refactored.container.condition.EnableOnTestFeature; +import integration.util.AuroraTestUtility; +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.TimeUnit; +import java.util.logging.Logger; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.api.TestTemplate; +import org.junit.jupiter.api.extension.ExtendWith; +import software.amazon.jdbc.ConnectionProviderManager; +import software.amazon.jdbc.HikariPoolConfigurator; +import software.amazon.jdbc.HikariPooledConnectionProvider; +import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; +import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; +import software.amazon.jdbc.plugin.readwritesplitting.ReadWriteSplittingPlugin; + +@TestMethodOrder(MethodOrderer.MethodName.class) +@ExtendWith(TestDriverProvider.class) +@EnableOnTestFeature(TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY) +@EnableOnDatabaseEngineDeployment({DatabaseEngineDeployment.AURORA}) +@EnableOnNumOfInstances(min = 5) +public class AutoscalingTests { + protected static final AuroraTestUtility auroraUtil = + new AuroraTestUtility(TestEnvironment.getCurrent().getInfo().getAuroraRegion()); + private static final Logger LOGGER = Logger.getLogger(AutoscalingTests.class.getName()); + + protected static Properties getProxiedProps() { + final Properties props = getProps(); + AuroraHostListProvider.CLUSTER_INSTANCE_HOST_PATTERN.set(props, + "?." + TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo() + .getInstanceEndpointSuffix()); + return props; + } + + protected static Properties getDefaultPropsNoPlugins() { + final Properties props = ConnectionStringHelper.getDefaultProperties(); + DriverHelper.setSocketTimeout(props, 10, TimeUnit.SECONDS); + DriverHelper.setConnectTimeout(props, 10, TimeUnit.SECONDS); + return props; + } + + protected static Properties getProps() { + final Properties props = getDefaultPropsNoPlugins(); + PropertyDefinition.PLUGINS.set(props, "auroraHostList,readWriteSplitting"); + return props; + } + + protected static Properties getPropsWithFailover() { + final Properties props = getDefaultPropsNoPlugins(); + PropertyDefinition.PLUGINS.set(props, "readWriteSplitting,failover,efm"); + return props; + } + + protected HikariPoolConfigurator getHikariConfig(int maxPoolSize) { + return (hostSpec, props) -> { + final HikariConfig config = new HikariConfig(); + config.setMaximumPoolSize(maxPoolSize); + config.setInitializationFailTimeout(75000); + config.setKeepaliveTime(30000); + return config; + }; + } + + // Old connection detects deleted instance when setReadOnly(true) is called + @TestTemplate + public void test_pooledConnectionAutoScaling_setReadOnlyOnOldConnection() + throws SQLException, InterruptedException { + final Properties props = getProps(); + final long topologyRefreshRateMs = 5000; + ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.set(props, "leastConnections"); + AuroraHostListProvider.CLUSTER_TOPOLOGY_REFRESH_RATE_MS.set(props, + Long.toString(topologyRefreshRateMs)); + + final TestEnvironmentInfo testInfo = TestEnvironment.getCurrent().getInfo(); + final List instances = testInfo.getDatabaseInfo().getInstances(); + final HikariPooledConnectionProvider provider = + new HikariPooledConnectionProvider(getHikariConfig(instances.size())); + ConnectionProviderManager.setConnectionProvider(provider); + + final List connections = new ArrayList<>(); + try { + for (int i = 1; i < instances.size(); i++) { + final String connString = ConnectionStringHelper.getWrapperUrl(instances.get(i)); + final Connection conn = DriverManager.getConnection(connString, props); + connections.add(conn); + } + + final Connection newInstanceConn; + final TestInstanceInfo newInstance = + auroraUtil.createInstance("auto-scaling-instance"); + try { + newInstanceConn = + DriverManager.getConnection(ConnectionStringHelper.getWrapperUrl(), props); + connections.add(newInstanceConn); + Thread.sleep(topologyRefreshRateMs); + newInstanceConn.setReadOnly(true); + final String readerId = auroraUtil.queryInstanceId(newInstanceConn); + + assertEquals(newInstance.getInstanceId(), readerId); + // Verify that there is a pool for the new instance + assertTrue(provider.getHosts().stream() + .anyMatch((url) -> url.equals(newInstance.getUrl()))); + newInstanceConn.setReadOnly(false); + } finally { + auroraUtil.deleteInstance(newInstance); + } + + // Should detect that the reader was deleted and remove the associated pool + newInstanceConn.setReadOnly(true); + assertFalse(provider.getHosts().stream() + .anyMatch((url) -> url.equals(newInstance.getUrl()))); + } finally { + for (Connection connection : connections) { + connection.close(); + } + ConnectionProviderManager.releaseResources(); + ConnectionProviderManager.resetProvider(); + } + } + + // User tries to directly connect to the deleted instance URL + // TODO: Hikari throws an NPE when closing the connection to the instance that is now deleted + @TestTemplate + public void test_pooledConnectionAutoScaling_newConnectionToDeletedReader() + throws SQLException, InterruptedException { + final Properties props = getProps(); + final long topologyRefreshRateMs = 5000; + ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.set(props, "leastConnections"); + AuroraHostListProvider.CLUSTER_TOPOLOGY_REFRESH_RATE_MS.set(props, + Long.toString(topologyRefreshRateMs)); + + final TestEnvironmentInfo testInfo = TestEnvironment.getCurrent().getInfo(); + final List instances = testInfo.getDatabaseInfo().getInstances(); + final HikariPooledConnectionProvider provider = + new HikariPooledConnectionProvider(getHikariConfig(instances.size() * 5)); + ConnectionProviderManager.setConnectionProvider(provider); + + final List connections = new ArrayList<>(); + try { + for (int i = 1; i < instances.size(); i++) { + final String connString = ConnectionStringHelper.getWrapperUrl(instances.get(i)); + final Connection conn1 = DriverManager.getConnection(connString, props); + connections.add(conn1); + final Connection conn2 = DriverManager.getConnection(connString, props); + connections.add(conn2); + } + + final Connection newInstanceConn; + final TestInstanceInfo newInstance = + auroraUtil.createInstance("auto-scaling-instance"); + try { + newInstanceConn = + DriverManager.getConnection(ConnectionStringHelper.getWrapperUrl(), props); + connections.add(newInstanceConn); + Thread.sleep(topologyRefreshRateMs); + newInstanceConn.setReadOnly(true); + final String readerId = auroraUtil.queryInstanceId(newInstanceConn); + + assertEquals(newInstance.getInstanceId(), readerId); + // Verify that there is a pool for the new instance + assertTrue(provider.getHosts().stream() + .anyMatch((url) -> url.equals(newInstance.getUrl()))); + } finally { + auroraUtil.deleteInstance(newInstance); + } + + Thread.sleep(topologyRefreshRateMs); + assertThrows(SQLException.class, () -> + DriverManager.getConnection(ConnectionStringHelper.getWrapperUrl(newInstance), props)); + + // Verify that the pool for the deleted instance is now removed + assertFalse(provider.getHosts().stream() + .anyMatch((url) -> url.equals(newInstance.getUrl()))); + } finally { + for (Connection connection : connections) { + // Hikari throws NPE when closing the connection to the deleted instance + connection.close(); + } + ConnectionProviderManager.releaseResources(); + ConnectionProviderManager.resetProvider(); + } + } + + // Attempt to use a connection to an instance that is now deleted, failover plugin is loaded + @TestTemplate + public void test_pooledConnectionAutoScaling_failoverFromDeletedReader() + throws SQLException, InterruptedException { + final Properties props = getPropsWithFailover(); + final long topologyRefreshRateMs = 5000; + ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.set(props, "leastConnections"); + AuroraHostListProvider.CLUSTER_TOPOLOGY_REFRESH_RATE_MS.set(props, + Long.toString(topologyRefreshRateMs)); + + final TestEnvironmentInfo testInfo = TestEnvironment.getCurrent().getInfo(); + final List instances = testInfo.getDatabaseInfo().getInstances(); + final HikariPooledConnectionProvider provider = + new HikariPooledConnectionProvider(getHikariConfig(instances.size() * 5)); + ConnectionProviderManager.setConnectionProvider(provider); + + final List connections = new ArrayList<>(); + try { + for (int i = 1; i < instances.size(); i++) { + final String connString = ConnectionStringHelper.getWrapperUrl(instances.get(i)); + final Connection conn1 = DriverManager.getConnection(connString, props); + connections.add(conn1); + final Connection conn2 = DriverManager.getConnection(connString, props); + connections.add(conn2); + } + + final Connection newInstanceConn; + final TestInstanceInfo newInstance = + auroraUtil.createInstance("auto-scaling-instance"); + try { + newInstanceConn = + DriverManager.getConnection(ConnectionStringHelper.getWrapperUrl(newInstance), props); + connections.add(newInstanceConn); + newInstanceConn.setReadOnly(true); + final String readerId = auroraUtil.queryInstanceId(newInstanceConn); + assertEquals(newInstance.getInstanceId(), readerId); + // Verify that there is a pool for the new instance + assertTrue(provider.getHosts().stream() + .anyMatch((url) -> url.equals(newInstance.getUrl()))); + } finally { + auroraUtil.deleteInstance(newInstance); + } + + auroraUtil.assertFirstQueryThrows(newInstanceConn, FailoverSuccessSQLException.class); + String newReaderId = auroraUtil.queryInstanceId(newInstanceConn); + assertNotEquals(newInstance.getInstanceId(), newReaderId); + } finally { + for (Connection connection : connections) { + connection.close(); + } + ConnectionProviderManager.releaseResources(); + ConnectionProviderManager.resetProvider(); + } + } +} diff --git a/wrapper/src/test/java/integration/refactored/container/tests/AwsIamIntegrationTest.java b/wrapper/src/test/java/integration/refactored/container/tests/AwsIamIntegrationTest.java index 2767e988d..cc6541625 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/AwsIamIntegrationTest.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/AwsIamIntegrationTest.java @@ -47,7 +47,10 @@ @TestMethodOrder(MethodOrderer.MethodName.class) @ExtendWith(TestDriverProvider.class) @EnableOnTestFeature(TestEnvironmentFeatures.IAM) -@DisableOnTestFeature({TestEnvironmentFeatures.PERFORMANCE, TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY}) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY}) // MariaDb driver has no configuration parameters to force using 'mysql_clear_password' // authentication that is essential for IAM. A proper user name and IAM token are passed to MariaDb // driver however 'mysql_native_password' authentication is chosen by default. @@ -105,7 +108,7 @@ public void test_AwsIam_UsingIPAddress() throws UnknownHostException, SQLExcepti .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint()); + .getHost()); props.setProperty( "iamHost", TestEnvironment.getCurrent() @@ -113,7 +116,7 @@ public void test_AwsIam_UsingIPAddress() throws UnknownHostException, SQLExcepti .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint()); + .getHost()); final Connection conn = DriverManager.getConnection( @@ -124,7 +127,7 @@ public void test_AwsIam_UsingIPAddress() throws UnknownHostException, SQLExcepti .getDatabaseInfo() .getInstances() .get(0) - .getEndpointPort(), + .getPort(), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()), props); Assertions.assertDoesNotThrow(conn::close); diff --git a/wrapper/src/test/java/integration/refactored/container/tests/BasicConnectivityTests.java b/wrapper/src/test/java/integration/refactored/container/tests/BasicConnectivityTests.java index f2b7ea5ad..9b2227f1c 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/BasicConnectivityTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/BasicConnectivityTests.java @@ -55,7 +55,10 @@ import software.amazon.jdbc.wrapper.ConnectionWrapper; @TestMethodOrder(MethodOrderer.MethodName.class) -@DisableOnTestFeature({TestEnvironmentFeatures.PERFORMANCE, TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY}) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY}) public class BasicConnectivityTests { private static final Logger LOGGER = Logger.getLogger(BasicConnectivityTests.class.getName()); @@ -77,13 +80,13 @@ public void test_DirectConnection(TestDriver testDriver) throws SQLException { .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint(), + .getHost(), TestEnvironment.getCurrent() .getInfo() .getDatabaseInfo() .getInstances() .get(0) - .getEndpointPort(), + .getPort(), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName(), ""); LOGGER.finest("Connecting to " + url); @@ -123,13 +126,13 @@ public void test_WrapperConnection(TestDriver testDriver) throws SQLException { .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint(), + .getHost(), TestEnvironment.getCurrent() .getInfo() .getDatabaseInfo() .getInstances() .get(0) - .getEndpointPort(), + .getPort(), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()); LOGGER.finest("Connecting to " + url); @@ -168,8 +171,8 @@ public void test_ProxiedDirectConnection(TestDriver testDriver) throws SQLExcept String url = ConnectionStringHelper.getUrl( testDriver, - instanceInfo.getEndpoint(), - instanceInfo.getEndpointPort(), + instanceInfo.getHost(), + instanceInfo.getPort(), TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getDefaultDbName(), ""); LOGGER.finest("Connecting to " + url); @@ -213,8 +216,8 @@ public void test_ProxiedWrapperConnection() throws SQLException { String url = ConnectionStringHelper.getWrapperUrl( - instanceInfo.getEndpoint(), - instanceInfo.getEndpointPort(), + instanceInfo.getHost(), + instanceInfo.getPort(), TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getDefaultDbName()); LOGGER.finest("Connecting to " + url); @@ -246,7 +249,7 @@ public void testSuccessOpenConnectionNoPort() throws SQLException { .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint() + .getHost() + "/" + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName() + DriverHelper.getDriverRequiredParameters(); @@ -340,14 +343,14 @@ private static Stream testConnectionParameters() { .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint(), + .getHost(), String.valueOf( TestEnvironment.getCurrent() .getInfo() .getDatabaseInfo() .getInstances() .get(0) - .getEndpointPort()), + .getPort()), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName(), DriverHelper.getDriverRequiredParameters(testDriver)))); @@ -362,14 +365,14 @@ private static Stream testConnectionParameters() { .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint(), + .getHost(), String.valueOf( TestEnvironment.getCurrent() .getInfo() .getDatabaseInfo() .getInstances() .get(0) - .getEndpointPort()), + .getPort()), "failedDatabaseNameTest", DriverHelper.getDriverRequiredParameters(testDriver)))); } @@ -391,14 +394,14 @@ private static Stream testPropertiesParameters() { .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint(), + .getHost(), String.valueOf( TestEnvironment.getCurrent() .getInfo() .getDatabaseInfo() .getInstances() .get(0) - .getEndpointPort()), + .getPort()), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName(), DriverHelper.getDriverRequiredParameters(testDriver)), "", @@ -415,14 +418,14 @@ private static Stream testPropertiesParameters() { .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint(), + .getHost(), String.valueOf( TestEnvironment.getCurrent() .getInfo() .getDatabaseInfo() .getInstances() .get(0) - .getEndpointPort()), + .getPort()), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName(), DriverHelper.getDriverRequiredParameters(testDriver)), TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getUsername(), diff --git a/wrapper/src/test/java/integration/refactored/container/tests/DataCachePluginTests.java b/wrapper/src/test/java/integration/refactored/container/tests/DataCachePluginTests.java index 0bd401a28..93f0ebb05 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/DataCachePluginTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/DataCachePluginTests.java @@ -45,7 +45,10 @@ @TestMethodOrder(MethodOrderer.MethodName.class) @ExtendWith(TestDriverProvider.class) -@DisableOnTestFeature({TestEnvironmentFeatures.PERFORMANCE, TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY}) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY}) @MakeSureFirstInstanceWriter public class DataCachePluginTests { diff --git a/wrapper/src/test/java/integration/refactored/container/tests/DataSourceTests.java b/wrapper/src/test/java/integration/refactored/container/tests/DataSourceTests.java index 82a513403..a1239ec9b 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/DataSourceTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/DataSourceTests.java @@ -49,7 +49,10 @@ @TestMethodOrder(MethodOrderer.MethodName.class) @ExtendWith(TestDriverProvider.class) -@DisableOnTestFeature({TestEnvironmentFeatures.PERFORMANCE, TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY}) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY}) public class DataSourceTests { private static final Logger LOGGER = Logger.getLogger(DataSourceTests.class.getName()); @@ -73,7 +76,7 @@ public void testConnectionWithDataSourceClassNameAndServerNameFromJndiLookup() .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint()); + .getHost()); targetDataSourceProps.setProperty( "databaseName", TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()); diff --git a/wrapper/src/test/java/integration/refactored/container/tests/DriverConfigurationProfileTests.java b/wrapper/src/test/java/integration/refactored/container/tests/DriverConfigurationProfileTests.java index d26b5120f..9d58f7044 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/DriverConfigurationProfileTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/DriverConfigurationProfileTests.java @@ -47,7 +47,10 @@ @TestMethodOrder(MethodOrderer.MethodName.class) @ExtendWith(TestDriverProvider.class) -@DisableOnTestFeature({TestEnvironmentFeatures.PERFORMANCE, TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY}) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY}) public class DriverConfigurationProfileTests { @TestTemplate diff --git a/wrapper/src/test/java/integration/refactored/container/tests/HikariTests.java b/wrapper/src/test/java/integration/refactored/container/tests/HikariTests.java index 2def95a25..0e6cae8f5 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/HikariTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/HikariTests.java @@ -66,8 +66,10 @@ @TestMethodOrder(MethodOrderer.MethodName.class) @ExtendWith(TestDriverProvider.class) @EnableOnTestFeature(TestEnvironmentFeatures.HIKARI) -@DisableOnTestFeature({TestEnvironmentFeatures.PERFORMANCE, - TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY}) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY}) @MakeSureFirstInstanceWriter public class HikariTests { @@ -125,7 +127,7 @@ public void testOpenConnectionWithDataSourceClassName() throws SQLException { .getDatabaseInfo() .getInstances() .get(0) - .getEndpoint()); + .getHost()); targetDataSourceProps.setProperty( "databaseName", TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()); @@ -266,7 +268,7 @@ private HikariConfig getConfig(final Properties customProps) { .getProxyDatabaseInfo() .getInstances() .get(0) - .getEndpoint()); + .getHost()); targetDataSourceProps.setProperty( "databaseName", TestEnvironment.getCurrent().getInfo().getProxyDatabaseInfo().getDefaultDbName()); diff --git a/wrapper/src/test/java/integration/refactored/container/tests/LogQueryPluginTests.java b/wrapper/src/test/java/integration/refactored/container/tests/LogQueryPluginTests.java index 3e5fdcacb..3ee45b83b 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/LogQueryPluginTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/LogQueryPluginTests.java @@ -44,7 +44,10 @@ @TestMethodOrder(MethodOrderer.MethodName.class) @ExtendWith(TestDriverProvider.class) -@DisableOnTestFeature({TestEnvironmentFeatures.PERFORMANCE, TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY}) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY}) public class LogQueryPluginTests { private static final Logger LOGGER = Logger.getLogger(LogQueryPluginTests.class.getName()); diff --git a/wrapper/src/test/java/integration/refactored/container/tests/PerformanceTest.java b/wrapper/src/test/java/integration/refactored/container/tests/PerformanceTest.java index 5d745f19b..78cfd2290 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/PerformanceTest.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/PerformanceTest.java @@ -374,7 +374,7 @@ private Connection openConnectionWithRetry(Properties props) { .getProxyDatabaseInfo() .getInstances() .get(0) - .getEndpoint()); + .getHost()); } return conn; } diff --git a/wrapper/src/test/java/integration/refactored/container/tests/ReadWriteSplittingTests.java b/wrapper/src/test/java/integration/refactored/container/tests/ReadWriteSplittingTests.java index 6d5b2730e..1c23e6d15 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/ReadWriteSplittingTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/ReadWriteSplittingTests.java @@ -50,6 +50,7 @@ import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Statement; +import java.util.ArrayList; import java.util.List; import java.util.Properties; import java.util.concurrent.TimeUnit; @@ -60,8 +61,8 @@ import org.junit.jupiter.api.TestTemplate; import org.junit.jupiter.api.extension.ExtendWith; import software.amazon.jdbc.ConnectionProviderManager; +import software.amazon.jdbc.HikariPoolConfigurator; import software.amazon.jdbc.HikariPooledConnectionProvider; -import software.amazon.jdbc.HostSpec; import software.amazon.jdbc.PropertyDefinition; import software.amazon.jdbc.hostlistprovider.AuroraHostListProvider; import software.amazon.jdbc.hostlistprovider.ConnectionStringHostListProvider; @@ -69,13 +70,18 @@ import software.amazon.jdbc.plugin.failover.FailoverFailedSQLException; import software.amazon.jdbc.plugin.failover.FailoverSuccessSQLException; import software.amazon.jdbc.plugin.failover.TransactionStateUnknownSQLException; +import software.amazon.jdbc.plugin.readwritesplitting.ReadWriteSplittingPlugin; +import software.amazon.jdbc.util.PropertyUtils; import software.amazon.jdbc.util.SqlState; @TestMethodOrder(MethodOrderer.MethodName.class) @ExtendWith(TestDriverProvider.class) @EnableOnNumOfInstances(min = 2) @EnableOnDatabaseEngineDeployment(DatabaseEngineDeployment.AURORA) -@DisableOnTestFeature({TestEnvironmentFeatures.PERFORMANCE, TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY}) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY}) @MakeSureFirstInstanceWriter public class ReadWriteSplittingTests { @@ -171,21 +177,9 @@ public void test_connectToReader_setReadOnlyTrueFalse() throws SQLException { // Assumes the writer is stored as the first instance and all other instances are readers. protected String getWrapperReaderInstanceUrl() { - return ConnectionStringHelper.getWrapperUrl( - TestEnvironment.getCurrent().getCurrentDriver(), - TestEnvironment.getCurrent() - .getInfo() - .getDatabaseInfo() - .getInstances() - .get(1) - .getEndpoint(), - TestEnvironment.getCurrent() - .getInfo() - .getDatabaseInfo() - .getInstances() - .get(1) - .getEndpointPort(), - TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()); + TestInstanceInfo readerInstance = + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getInstances().get(1); + return ConnectionStringHelper.getWrapperUrl(readerInstance); } @TestTemplate @@ -304,15 +298,13 @@ public void test_setReadOnlyTrueInTransaction() throws SQLException { @TestTemplate public void test_setReadOnlyTrue_oneHost() throws SQLException { - - // Use static host list with only one host final Properties props = getDefaultPropsNoPlugins(); PropertyDefinition.PLUGINS.set(props, "readWriteSplitting"); ConnectionStringHostListProvider.SINGLE_WRITER_CONNECTION_STRING.set(props, "true"); final String writerUrl = - TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getInstances().get(0).getEndpoint(); + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getInstances().get(0).getHost(); final String url = DriverHelper.getWrapperDriverProtocol() + writerUrl + "/" + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName() @@ -573,7 +565,7 @@ public void test_pooledConnection_reuseCachedConnection() throws SQLException { Properties props = getProps(); final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider(ReadWriteSplittingTests::getHikariConfig); + new HikariPooledConnectionProvider(getHikariConfig(1)); ConnectionProviderManager.setConnectionProvider(provider); final Connection conn1; @@ -600,11 +592,14 @@ public void test_pooledConnection_reuseCachedConnection() throws SQLException { } } - protected static HikariConfig getHikariConfig(HostSpec hostSpec, Properties props) { - final HikariConfig config = new HikariConfig(); - config.setMaximumPoolSize(1); - config.setInitializationFailTimeout(20000); - return config; + protected HikariPoolConfigurator getHikariConfig(int maxPoolSize) { + return (hostSpec, props) -> { + final HikariConfig config = new HikariConfig(); + config.setMaximumPoolSize(maxPoolSize); + config.setInitializationFailTimeout(75000); + config.setKeepaliveTime(30000); + return config; + }; } @TestTemplate @@ -613,7 +608,7 @@ public void test_pooledConnectionFailover() throws SQLException, InterruptedExce Properties props = getPropsWithFailover(); final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider(ReadWriteSplittingTests::getHikariConfig); + new HikariPooledConnectionProvider(getHikariConfig(1)); ConnectionProviderManager.setConnectionProvider(provider); final String initialWriterId; @@ -655,7 +650,7 @@ public void test_pooledConnectionFailoverWithClusterURL() throws SQLException, I Properties props = getPropsWithFailover(); final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider(ReadWriteSplittingTests::getHikariConfig); + new HikariPooledConnectionProvider(getHikariConfig(1)); ConnectionProviderManager.setConnectionProvider(provider); final Connection initialWriterConn; @@ -689,7 +684,7 @@ public void test_pooledConnection_failoverFailed() throws SQLException { DriverHelper.setMonitoringSocketTimeout(props, 3, TimeUnit.SECONDS); final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider(ReadWriteSplittingTests::getHikariConfig); + new HikariPooledConnectionProvider(getHikariConfig(1)); ConnectionProviderManager.setConnectionProvider(provider); final Connection initialWriterConn1; @@ -726,7 +721,7 @@ public void test_pooledConnection_failoverInTransaction() Properties props = getPropsWithFailover(); final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider(ReadWriteSplittingTests::getHikariConfig); + new HikariPooledConnectionProvider(getHikariConfig(1)); ConnectionProviderManager.setConnectionProvider(provider); final String initialWriterId; @@ -783,7 +778,7 @@ public void test_pooledConnection_differentUsers() throws SQLException { wrongUserRightPasswordProps.setProperty(PropertyDefinition.USER.name, "bogus_user"); final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider(ReadWriteSplittingTests::getHikariConfig); + new HikariPooledConnectionProvider(getHikariConfig(1)); ConnectionProviderManager.setConnectionProvider(provider); try { @@ -826,4 +821,106 @@ public void test_pooledConnection_differentUsers() throws SQLException { } } } + + @TestTemplate + @EnableOnNumOfInstances(min = 5) + public void test_pooledConnection_leastConnectionsStrategy() throws SQLException { + final Properties props = getProps(); + ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.set(props, "leastConnections"); + + final List instances = + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getInstances(); + final HikariPooledConnectionProvider provider = + new HikariPooledConnectionProvider(getHikariConfig(instances.size())); + ConnectionProviderManager.setConnectionProvider(provider); + + final List connections = new ArrayList<>(); + final List connectedReaderIDs = new ArrayList<>(); + try { + // Assume one writer and [size - 1] readers + for (int i = 0; i < instances.size() - 1; i++) { + final Connection conn = + DriverManager.getConnection(ConnectionStringHelper.getWrapperUrl(), props); + connections.add(conn); + conn.setReadOnly(true); + final String readerId = auroraUtil.queryInstanceId(conn); + + assertFalse(connectedReaderIDs.contains(readerId)); + connectedReaderIDs.add(readerId); + } + } finally { + for (Connection connection : connections) { + connection.close(); + } + ConnectionProviderManager.releaseResources(); + ConnectionProviderManager.resetProvider(); + } + } + + /** + * Tests custom pool mapping together with internal connection pools and the leastConnections + * host selection strategy. This test overloads one reader with connections and then verifies + * that new connections are sent to the other readers until their connection count equals that of + * the overloaded reader. + */ + @TestTemplate + @EnableOnNumOfInstances(min = 5) + public void test_pooledConnection_leastConnectionsWithPoolMapping() throws SQLException { + final Properties defaultProps = getProps(); + ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.set(defaultProps, "leastConnections"); + + final List propSets = new ArrayList<>(); + final int numOverloadedReaderConnections = 3; + for (int i = 0; i < numOverloadedReaderConnections; i++) { + final Properties props = PropertyUtils.copyProperties(defaultProps); + props.setProperty("arbitraryProp", "value" + i); + propSets.add(props); + } + + final List instances = + TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getInstances(); + // We will be testing all instances excluding the writer and overloaded reader. Each instance + // should be tested numOverloadedReaderConnections times to increase the pool connection count + // until it equals the connection count of the overloaded reader. + final int numTestConnections = (instances.size() - 2) * numOverloadedReaderConnections; + final HikariPooledConnectionProvider provider = + new HikariPooledConnectionProvider( + getHikariConfig(numTestConnections), + // Create a new pool for each instance-arbitraryProp combination + (hostSpec, connProps) -> hostSpec.getUrl() + connProps.getProperty("arbitraryProp") + ); + ConnectionProviderManager.setConnectionProvider(provider); + + final List connections = new ArrayList<>(); + try { + final TestInstanceInfo overloadedReader = instances.get(1); + final String overloadedReaderConnString = + ConnectionStringHelper.getWrapperUrl(overloadedReader); + for (int i = 0; i < numOverloadedReaderConnections; i++) { + // This should result in numOverloadedReaderConnections pools to the same reader instance, + // with each pool consisting of just one connection. The total connection count for the + // instance should be numOverloadedReaderConnections despite being spread across multiple + // pools. + final Connection conn = DriverManager.getConnection(overloadedReaderConnString, propSets.get(i)); + connections.add(conn); + } + + final String overloadedReaderId = overloadedReader.getInstanceId(); + for (int i = 0; i < numTestConnections; i++) { + final Connection conn = + DriverManager.getConnection(ConnectionStringHelper.getWrapperUrl(), defaultProps); + connections.add(conn); + conn.setReadOnly(true); + final String readerId = auroraUtil.queryInstanceId(conn); + assertNotEquals(overloadedReaderId, readerId); + } + } finally { + for (Connection connection : connections) { + connection.close(); + } + + ConnectionProviderManager.releaseResources(); + ConnectionProviderManager.resetProvider(); + } + } } diff --git a/wrapper/src/test/java/integration/refactored/container/tests/SpringTests.java b/wrapper/src/test/java/integration/refactored/container/tests/SpringTests.java index 2f8b9e20b..307558b43 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/SpringTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/SpringTests.java @@ -36,7 +36,10 @@ @TestMethodOrder(MethodOrderer.MethodName.class) @ExtendWith(TestDriverProvider.class) -@DisableOnTestFeature({TestEnvironmentFeatures.PERFORMANCE, TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY}) +@DisableOnTestFeature({ + TestEnvironmentFeatures.PERFORMANCE, + TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY, + TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY}) public class SpringTests { @TestTemplate diff --git a/wrapper/src/test/java/integration/refactored/host/TestEnvironment.java b/wrapper/src/test/java/integration/refactored/host/TestEnvironment.java index c4751f980..4a03718eb 100644 --- a/wrapper/src/test/java/integration/refactored/host/TestEnvironment.java +++ b/wrapper/src/test/java/integration/refactored/host/TestEnvironment.java @@ -43,6 +43,7 @@ import org.testcontainers.containers.ToxiproxyContainer; import org.testcontainers.shaded.org.apache.commons.lang3.NotImplementedException; import org.testcontainers.utility.MountableFile; +import software.amazon.awssdk.services.rds.model.DBCluster; import software.amazon.jdbc.util.StringUtils; public class TestEnvironment implements AutoCloseable { @@ -280,8 +281,9 @@ private static void createAuroraDbCluster(TestEnvironment env, int numOfInstance LOGGER.finer( "Reuse existing cluster " + env.auroraClusterName + ".cluster-" + env.auroraClusterDomain); - DatabaseEngine existingClusterDatabaseEngine = - env.auroraUtil.getClusterDatabaseEngine(env.auroraClusterName); + DBCluster clusterInfo = env.auroraUtil.getClusterInfo(env.auroraClusterName); + + DatabaseEngine existingClusterDatabaseEngine = env.auroraUtil.getClusterEngine(clusterInfo); if (existingClusterDatabaseEngine != env.info.getRequest().getDatabaseEngine()) { throw new RuntimeException( "Existing cluster is " @@ -291,6 +293,8 @@ private static void createAuroraDbCluster(TestEnvironment env, int numOfInstance + " is expected."); } + env.info.setDatabaseEngine(clusterInfo.engine()); + env.info.setDatabaseEngineVersion(clusterInfo.engineVersion()); instances.addAll(env.auroraUtil.getClusterInstanceIds(env.auroraClusterName)); } else { @@ -300,20 +304,25 @@ private static void createAuroraDbCluster(TestEnvironment env, int numOfInstance } try { + String engine = getAuroraDbEngine(env.info.getRequest()); + String engineVersion = getAuroraDbEngineVersion(env.info.getRequest()); + String instanceClass = getAuroraInstanceClass(env.info.getRequest()); + env.auroraClusterDomain = env.auroraUtil.createCluster( env.info.getDatabaseInfo().getUsername(), env.info.getDatabaseInfo().getPassword(), env.info.getDatabaseInfo().getDefaultDbName(), env.auroraClusterName, - getAuroraDbEngine(env.info.getRequest()), - getAuroraInstanceClass(env.info.getRequest()), - getAuroraDbEngineVersion(env.info.getRequest()), + engine, + instanceClass, + engineVersion, numOfInstances, instances); + env.info.setDatabaseEngine(engine); + env.info.setDatabaseEngineVersion(engineVersion); LOGGER.finer( "Created a new cluster " + env.auroraClusterName + ".cluster-" + env.auroraClusterDomain); - } catch (Exception e) { LOGGER.finer("Error creating a cluster " + env.auroraClusterName + ". " + e.getMessage()); @@ -472,7 +481,7 @@ private static void createProxyContainers(TestEnvironment env) { env.proxyContainers.add(container); ToxiproxyContainer.ContainerProxy proxy = - container.getProxy(instance.getEndpoint(), instance.getEndpointPort()); + container.getProxy(instance.getHost(), instance.getPort()); if (proxyPort != 0 && proxyPort != proxy.getOriginalProxyPort()) { throw new RuntimeException("DB cluster proxies should be on the same port."); @@ -526,7 +535,7 @@ private static void createProxyContainers(TestEnvironment env) { TestInstanceInfo proxyInstanceInfo = new TestInstanceInfo( instanceInfo.getInstanceId(), - instanceInfo.getEndpoint() + PROXIED_DOMAIN_NAME_SUFFIX, + instanceInfo.getHost() + PROXIED_DOMAIN_NAME_SUFFIX, proxyPort); env.info.getProxyDatabaseInfo().getInstances().add(proxyInstanceInfo); } diff --git a/wrapper/src/test/java/integration/refactored/host/TestEnvironmentProvider.java b/wrapper/src/test/java/integration/refactored/host/TestEnvironmentProvider.java index 4efdb0e68..9309a270a 100644 --- a/wrapper/src/test/java/integration/refactored/host/TestEnvironmentProvider.java +++ b/wrapper/src/test/java/integration/refactored/host/TestEnvironmentProvider.java @@ -73,6 +73,7 @@ public Stream provideTestTemplateInvocationContex final boolean noGraalVm = Boolean.parseBoolean(System.getProperty("test-no-graalvm", "false")); final boolean noOpenJdk = Boolean.parseBoolean(System.getProperty("test-no-openjdk", "false")); final boolean testHibernateOnly = Boolean.parseBoolean(System.getProperty("test-hibernate-only", "false")); + final boolean testAutoscalingOnly = Boolean.parseBoolean(System.getProperty("test-autoscaling-only", "false")); if (!noDocker) { if (!noMysqlEngine && !noOpenJdk) { @@ -89,7 +90,8 @@ public Stream provideTestTemplateInvocationContex noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null, - testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null))); + testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null, + testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); } if (!noPgEngine && !noOpenJdk) { resultContextList.add( @@ -105,7 +107,8 @@ public Stream provideTestTemplateInvocationContex noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null, - testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null))); + testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null, + testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); } if (!noMariadbEngine && !noOpenJdk) { resultContextList.add( @@ -184,7 +187,8 @@ public Stream provideTestTemplateInvocationContex noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null, - testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null))); + testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null, + testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); } if (!noPgEngine && !noOpenJdk) { resultContextList.add( @@ -200,7 +204,8 @@ public Stream provideTestTemplateInvocationContex noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null, - testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null))); + testHibernateOnly ? TestEnvironmentFeatures.RUN_HIBERNATE_TESTS_ONLY : null, + testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); } if (!noMariadbEngine && !noOpenJdk) { resultContextList.add( @@ -283,7 +288,8 @@ public Stream provideTestTemplateInvocationContex noPerformance ? null : TestEnvironmentFeatures.PERFORMANCE, noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null))); + noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null, + testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); // Tests for HIKARI, IAM, SECRETS_MANAGER and PERFORMANCE are covered by // cluster configuration above, so it's safe to skip these tests for configurations below. @@ -301,7 +307,8 @@ public Stream provideTestTemplateInvocationContex TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED, noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null))); + noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null, + testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); } if (!noPgEngine && !noOpenJdk) { resultContextList.add( @@ -321,7 +328,8 @@ public Stream provideTestTemplateInvocationContex noPerformance ? null : TestEnvironmentFeatures.PERFORMANCE, noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null))); + noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null, + testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); // Tests for HIKARI, IAM, SECRETS_MANAGER and PERFORMANCE are covered by // cluster configuration above, so it's safe to skip these tests for configurations below. @@ -339,7 +347,8 @@ public Stream provideTestTemplateInvocationContex TestEnvironmentFeatures.AWS_CREDENTIALS_ENABLED, noMysqlDriver ? TestEnvironmentFeatures.SKIP_MYSQL_DRIVER_TESTS : null, noPgDriver ? TestEnvironmentFeatures.SKIP_PG_DRIVER_TESTS : null, - noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null))); + noMariadbDriver ? TestEnvironmentFeatures.SKIP_MARIADB_DRIVER_TESTS : null, + testAutoscalingOnly ? TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY : null))); } } diff --git a/wrapper/src/test/java/integration/util/AuroraTestUtility.java b/wrapper/src/test/java/integration/util/AuroraTestUtility.java index d3d58c601..4d1681b01 100644 --- a/wrapper/src/test/java/integration/util/AuroraTestUtility.java +++ b/wrapper/src/test/java/integration/util/AuroraTestUtility.java @@ -23,6 +23,7 @@ import integration.refactored.DatabaseEngine; import integration.refactored.DriverHelper; import integration.refactored.TestDatabaseInfo; +import integration.refactored.TestEnvironmentInfo; import integration.refactored.TestInstanceInfo; import integration.refactored.container.ConnectionStringHelper; import integration.refactored.container.TestEnvironment; @@ -239,7 +240,6 @@ public String createCluster() throws InterruptedException { CreateDbInstanceRequest.builder() .dbClusterIdentifier(dbIdentifier) .dbInstanceIdentifier(instanceName) - .dbClusterIdentifier(dbIdentifier) .dbInstanceClass(dbInstanceClass) .engine(dbEngine) .engineVersion(dbEngineVersion) @@ -282,6 +282,94 @@ public String createCluster() throws InterruptedException { return clusterDomainPrefix; } + /** + * Creates an RDS instance under the current cluster and waits until it is up. + * + * @param instanceId the desired instance ID of the new instance + * @return the instance info of the new instance + * @throws InterruptedException if the new instance is not available within 5 minutes + */ + public TestInstanceInfo createInstance(String instanceId) throws InterruptedException { + final Tag testRunnerTag = Tag.builder().key("env").value("test-runner").build(); + final TestEnvironmentInfo info = TestEnvironment.getCurrent().getInfo(); + + rdsClient.createDBInstance( + CreateDbInstanceRequest.builder() + .dbClusterIdentifier(info.getAuroraClusterName()) + .dbInstanceIdentifier(instanceId) + .dbInstanceClass(dbInstanceClass) + .engine(info.getDatabaseEngine()) + .engineVersion(info.getDatabaseEngineVersion()) + .publiclyAccessible(true) + .tags(testRunnerTag) + .build()); + + // Wait for the instance to become available + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = + waiter.waitUntilDBInstanceAvailable( + (requestBuilder) -> + requestBuilder.filters( + Filter.builder().name("db-instance-id").values(instanceId).build()), + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(10))); + + if (waiterResponse.matched().exception().isPresent()) { + deleteCluster(); + throw new InterruptedException( + "Instance creation timeout for " + instanceId + + ". The instance was not available within 5 minutes"); + } + + final DescribeDbInstancesResponse dbInstancesResult = + rdsClient.describeDBInstances( + (builder) -> + builder.filters( + Filter.builder().name("db-instance-id").values(instanceId).build())); + + if (dbInstancesResult.dbInstances().size() != 1) { + throw new RuntimeException( + "The describeDBInstances request for newly created instance " + instanceId + + " returned an unexpected number of instances: " + + dbInstancesResult.dbInstances().size()); + } + + DBInstance instance = dbInstancesResult.dbInstances().get(0); + TestInstanceInfo instanceInfo = new TestInstanceInfo( + instance.dbInstanceIdentifier(), + instance.endpoint().address(), + instance.endpoint().port()); + this.instances.add(instanceInfo); + return instanceInfo; + } + + /** + * Deletes an RDS instance. + * + * @param instanceToDelete the info for the instance to delete + * @throws InterruptedException if the instance has not been deleted within 5 minutes + */ + public void deleteInstance(TestInstanceInfo instanceToDelete) throws InterruptedException { + rdsClient.deleteDBInstance( + DeleteDbInstanceRequest.builder() + .dbInstanceIdentifier(instanceToDelete.getInstanceId()) + .skipFinalSnapshot(true) + .build()); + this.instances.remove(instanceToDelete); + + final RdsWaiter waiter = rdsClient.waiter(); + WaiterResponse waiterResponse = waiter.waitUntilDBInstanceDeleted( + (requestBuilder) -> requestBuilder.filters( + Filter.builder().name("db-instance-id").values(instanceToDelete.getInstanceId()) + .build()), + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(10))); + + if (waiterResponse.matched().exception().isPresent()) { + throw new InterruptedException( + "Instance deletion timeout for " + instanceToDelete.getInstanceId() + + ". The instance was not deleted within 5 minutes"); + } + } + /** * Gets public IP. * @@ -408,7 +496,7 @@ public boolean doesClusterExist(final String clusterId) { return true; } - public DatabaseEngine getClusterDatabaseEngine(final String clusterId) { + public DBCluster getClusterInfo(final String clusterId) { final DescribeDbClustersRequest request = DescribeDbClustersRequest.builder().dbClusterIdentifier(clusterId).build(); final DescribeDbClustersResponse response = rdsClient.describeDBClusters(request); @@ -416,13 +504,17 @@ public DatabaseEngine getClusterDatabaseEngine(final String clusterId) { throw new RuntimeException("Cluster " + clusterId + " not found."); } - switch (response.dbClusters().get(0).engine()) { + return response.dbClusters().get(0); + } + + public DatabaseEngine getClusterEngine(final DBCluster cluster) { + switch (cluster.engine()) { case "aurora-postgresql": return DatabaseEngine.PG; case "aurora-mysql": return DatabaseEngine.MYSQL; default: - throw new UnsupportedOperationException(response.dbClusters().get(0).engine()); + throw new UnsupportedOperationException(cluster.engine()); } } @@ -549,8 +641,8 @@ protected void makeSureInstancesUp(List instances, boolean finalCheck) try (final Connection conn = DriverManager.getConnection( ConnectionStringHelper.getUrl( - instanceInfo.getEndpoint(), - instanceInfo.getEndpointPort(), + instanceInfo.getHost(), + instanceInfo.getPort(), TestEnvironment.getCurrent() .getInfo() .getDatabaseInfo() @@ -630,11 +722,11 @@ public void failoverClusterToATargetAndWaitUntilWriterChanged( // Failover has finished, wait for DNS to be updated so cluster endpoint resolves to the correct writer instance. String clusterIp = hostToIP(clusterEndpoint); - String targetWriterIp = hostToIP(dbInfo.getInstance(targetWriterId).getEndpoint()); + String targetWriterIp = hostToIP(dbInfo.getInstance(targetWriterId).getHost()); while (!clusterIp.equals(targetWriterIp)) { TimeUnit.SECONDS.sleep(1); clusterIp = hostToIP(clusterEndpoint); - targetWriterIp = hostToIP(dbInfo.getInstance(targetWriterId).getEndpoint()); + targetWriterIp = hostToIP(dbInfo.getInstance(targetWriterId).getHost()); } // Wait for target instance to be verified as a writer diff --git a/wrapper/src/test/java/integration/util/ContainerHelper.java b/wrapper/src/test/java/integration/util/ContainerHelper.java index 2f77eb2bb..62af047e9 100644 --- a/wrapper/src/test/java/integration/util/ContainerHelper.java +++ b/wrapper/src/test/java/integration/util/ContainerHelper.java @@ -306,7 +306,7 @@ public ToxiproxyContainer createProxyContainer( .withNetwork(network) .withNetworkAliases( "proxy-instance-" + instance.getInstanceId(), - instance.getEndpoint() + proxyDomainNameSuffix); + instance.getHost() + proxyDomainNameSuffix); } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java index b5787e18a..cd2a83cd5 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java @@ -73,7 +73,7 @@ void testConnectWithDefaultMapping() throws SQLException { props.setProperty(PropertyDefinition.USER.name, "username"); props.setProperty(PropertyDefinition.PASSWORD.name, "password"); try (Connection conn = provider.connect( - "protocol", mockDialect, mockHostSpec, props)) { + "protocol", mockDialect, mockHostSpec, props, true)) { assertEquals(mockConnection, conn); assertEquals(1, provider.getHostCount()); final Set hosts = provider.getHosts(); @@ -98,10 +98,10 @@ void testConnectWithCustomMapping() throws SQLException { props.setProperty(PropertyDefinition.USER.name, "username"); props.setProperty(PropertyDefinition.PASSWORD.name, "password"); try (Connection conn = provider.connect( - "protocol", mockDialect, mockHostSpec, props)) { + "protocol", mockDialect, mockHostSpec, props, true)) { assertEquals(mockConnection, conn); assertEquals(1, provider.getHostCount()); - final Set hosts = provider.getHosts(); + final Set hosts = provider.getKeys(); assertEquals(expected, hosts); } From e7370bbe09198962db7565cb24dc7dd82a700c4a Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Thu, 11 May 2023 11:49:22 -0700 Subject: [PATCH 02/21] Add MakeSureFirstInstanceWriter --- .../amazon/jdbc/HikariPooledConnectionProvider.java | 11 ----------- .../refactored/container/tests/AutoscalingTests.java | 2 ++ 2 files changed, 2 insertions(+), 11 deletions(-) diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index 3940f511e..597c7a457 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -205,17 +205,6 @@ public Connection connect( databasePools.remove(hostSpec.getUrl()); } - if (isInitialConnection) { - String errorMessage = latestConnectException == null - ? Messages.get("HikariPooledConnectionProvider.errorConnectingWithDataSource", - new Object[]{hostSpec.getUrl()}) - : Messages.get("HikariPooledConnectionProvider.errorConnectingWithDataSourceWithCause", - new Object[] {hostSpec.getUrl(), latestConnectException.getMessage()}); - throw new SQLException(errorMessage); - } else { - // try to connect to another instance - } - String errorMessage = latestConnectException == null ? Messages.get("HikariPooledConnectionProvider.errorConnectingWithDataSource", new Object[]{hostSpec.getUrl()}) diff --git a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java index 008e8d5e4..290574d84 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java @@ -34,6 +34,7 @@ import integration.refactored.container.condition.EnableOnDatabaseEngineDeployment; import integration.refactored.container.condition.EnableOnNumOfInstances; import integration.refactored.container.condition.EnableOnTestFeature; +import integration.refactored.container.condition.MakeSureFirstInstanceWriter; import integration.util.AuroraTestUtility; import java.sql.Connection; import java.sql.DriverManager; @@ -60,6 +61,7 @@ @EnableOnTestFeature(TestEnvironmentFeatures.RUN_AUTOSCALING_TESTS_ONLY) @EnableOnDatabaseEngineDeployment({DatabaseEngineDeployment.AURORA}) @EnableOnNumOfInstances(min = 5) +@MakeSureFirstInstanceWriter public class AutoscalingTests { protected static final AuroraTestUtility auroraUtil = new AuroraTestUtility(TestEnvironment.getCurrent().getInfo().getAuroraRegion()); From ee0d392d0a93bb7eda21f58588f0843892df1291 Mon Sep 17 00:00:00 2001 From: sergiyv-bitquill Date: Thu, 11 May 2023 16:45:52 -0700 Subject: [PATCH 03/21] use CacheMap to store connection pools; implement computeIfAbsent() for CacheMap; provide dispose callback function to properly close DataSource; --- .../amazon/jdbc/ConnectionProvider.java | 9 - .../jdbc/ConnectionProviderManager.java | 16 -- .../jdbc/DataSourceConnectionProvider.java | 5 - .../amazon/jdbc/DriverConnectionProvider.java | 5 - .../jdbc/HikariPooledConnectionProvider.java | 185 ++++++++---------- .../jdbc/plugin/DefaultConnectionPlugin.java | 1 - .../software/amazon/jdbc/util/CacheMap.java | 73 ++++++- .../HikariPooledConnectionProviderTest.java | 17 +- 8 files changed, 162 insertions(+), 149 deletions(-) diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java index 23726b079..897ba69fa 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java @@ -97,13 +97,4 @@ Connection connect( */ Connection connect(@NonNull String url, @NonNull Properties props) throws SQLException; // TODO: this method is only called by tests/benchmarks and can likely be deprecated - - /** - * Notify this {@link ConnectionProvider} of any changes detected with any of the database - * instances. - * - * @param changes a map from a given database instance's URL to the detected changes for that - * instance - */ - void notifyNodeListChanged(Map> changes); } diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java index e9cd39083..5c3a7dc5c 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java @@ -166,22 +166,6 @@ public HostSpec getHostSpecByStrategy(List hosts, HostRole role, Strin return host; } - public void notifyNodeListChanged(final Map> changes) { - if (connProvider != null) { - connProviderLock.writeLock().lock(); - try { - if (connProvider != null) { - connProvider.notifyNodeListChanged(changes); - } - } catch (UnsupportedOperationException e) { - // The custom provider does not support the provided strategy, ignore it and try with the default provider. - } finally { - connProviderLock.writeLock().unlock(); - } - } - defaultProvider.notifyNodeListChanged(changes); - } - /** * Clears the non-default {@link ConnectionProvider} if it has been set. The default * ConnectionProvider will be used if the non-default ConnectionProvider has not been set or has diff --git a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java index 0f7435809..814d6f12c 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java @@ -224,9 +224,4 @@ private DataSource createDataSource() throws SQLException { throw new SQLException(instEx.getMessage(), SqlState.UNKNOWN_STATE.getState(), instEx); } } - - @Override - public void notifyNodeListChanged(Map> changes) { - // Do nothing - } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java index 29291345a..065db297a 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java @@ -148,9 +148,4 @@ public Connection connect(@NonNull final String url, @NonNull final Properties p LOGGER.finest(() -> "Connecting to " + url); return this.driver.connect(url, props); } - - @Override - public void notifyNodeListChanged(Map> changes) { - // Do nothing - } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index 597c7a457..d866db73a 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -21,20 +21,18 @@ import java.sql.Connection; import java.sql.SQLException; import java.util.Collections; -import java.util.EnumSet; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; -import java.util.Map; +import java.util.Map.Entry; import java.util.Properties; import java.util.Set; import java.util.StringJoiner; -import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; import java.util.logging.Logger; import java.util.stream.Collectors; import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.cleanup.CanReleaseResources; import software.amazon.jdbc.dialect.Dialect; +import software.amazon.jdbc.util.CacheMap; import software.amazon.jdbc.util.HikariCPSQLException; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.RdsUrlType; @@ -47,12 +45,15 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, private static final Logger LOGGER = Logger.getLogger(HikariPooledConnectionProvider.class.getName()); + private static final String LEAST_CONNECTIONS_STRATEGY = "leastConnections"; + private static final RdsUtils rdsUtils = new RdsUtils(); // 2-layer map that helps us keep track of how many pools are associated with each database // instance. The first layer maps from an instance URL to the 2nd map. The 2nd layer maps from // the key returned from the poolMapping function to the database pool. - private static final Map> databasePools = new ConcurrentHashMap<>(); + private static final CacheMap databasePools = new CacheMap<>(); + private static final long poolExpirationNanos = TimeUnit.MINUTES.toNanos(30); private final HikariPoolConfigurator poolConfigurator; private final HikariPoolMapping poolMapping; protected int retries = 10; @@ -73,7 +74,7 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, * HikariConfig. */ public HikariPooledConnectionProvider(HikariPoolConfigurator hikariPoolConfigurator) { - this(hikariPoolConfigurator, (hostSpec, properties) -> hostSpec.getUrl()); + this(hikariPoolConfigurator, null); } /** @@ -110,14 +111,14 @@ public boolean acceptsUrl( @Override public boolean acceptsStrategy(@NonNull HostRole role, @NonNull String strategy) { - return "leastConnections".equals(strategy); + return LEAST_CONNECTIONS_STRATEGY.equals(strategy); } @Override public HostSpec getHostSpecByStrategy( @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy) throws SQLException { - if (!"leastConnections".equals(strategy)) { + if (!LEAST_CONNECTIONS_STRATEGY.equals(strategy)) { throw new UnsupportedOperationException( Messages.get( "ConnectionProvider.unsupportedHostSpecSelectorStrategy", @@ -127,29 +128,25 @@ public HostSpec getHostSpecByStrategy( // Remove hosts with the wrong role List eligibleHosts = hosts.stream() .filter(hostSpec -> role.equals(hostSpec.getRole())) + .sorted((hostSpec1, hostSpec2) -> + getNumConnections(hostSpec1) - getNumConnections(hostSpec2)) .collect(Collectors.toList()); + if (eligibleHosts.size() == 0) { throw new SQLException(Messages.get("HostSelector.noHostsMatchingRole", new Object[]{role})); } - if (HostRole.WRITER.equals(role)) { - // Assuming there is only one writer, there are no other valid hosts to return, so we do - // not have to sort by least connections. - return eligibleHosts.get(0); - } - - eligibleHosts.sort((hostSpec1, hostSpec2) -> - getNumConnections(hostSpec1) - getNumConnections(hostSpec2)); return eligibleHosts.get(0); } private int getNumConnections(HostSpec hostSpec) { - Map hostPools = databasePools.get(hostSpec.getUrl()); int numConnections = 0; - if (hostPools != null) { - for (HikariDataSource ds : hostPools.values()) { - numConnections += ds.getHikariPoolMXBean().getActiveConnections(); + final String url = hostSpec.getUrl(); + for (Entry entry : databasePools.getEntries().entrySet()) { + if (!url.equals(entry.getKey().url)) { + continue; } + numConnections += entry.getValue().getHikariPoolMXBean().getActiveConnections(); } return numConnections; } @@ -162,55 +159,17 @@ public Connection connect( @NonNull Properties props, boolean isInitialConnection) throws SQLException { - // TODO: do we need to lock databasePools when we are interacting with it? Even though it is - // a ConcurrentHashMap, it contains another ConcurrentHashMap. Could threads interleave - // between getting the inner hashmap and updating/creating it? - final Map hostPools = databasePools.computeIfAbsent( - hostSpec.getUrl(), - url -> new HashMap<>() - ); - final String poolKey = getPoolKey(hostSpec, props); - final HikariDataSource ds = hostPools.computeIfAbsent( - poolKey, - lambdaPoolKey -> createHikariDataSource(protocol, hostSpec, props) + final HikariDataSource ds = databasePools.computeIfAbsent( + new PoolKey(hostSpec.getUrl(), getPoolKey(hostSpec, props)), + (lambdaPoolKey) -> createHikariDataSource(protocol, hostSpec, props), + poolExpirationNanos, + (hikariDataSource) -> hikariDataSource.close() ); - ds.setPassword(props.getProperty(PropertyDefinition.PASSWORD.name)); - - Connection conn = null; - SQLException latestConnectException = null; - for (int i = 0; i < retries + 1; i++) { - try { - conn = ds.getConnection(); - if (conn != null && !conn.isValid(3)) { - ds.evictConnection(conn); - } - break; - } catch (SQLException e) { - latestConnectException = e; - if (conn != null) { - ds.evictConnection(conn); - } - } - } - if (conn != null) { - return conn; - } - - // Data source is not working - ds.close(); - hostPools.remove(poolKey); - if (hostPools.size() == 0) { - databasePools.remove(hostSpec.getUrl()); - } + ds.setPassword(props.getProperty(PropertyDefinition.PASSWORD.name)); - String errorMessage = latestConnectException == null - ? Messages.get("HikariPooledConnectionProvider.errorConnectingWithDataSource", - new Object[]{hostSpec.getUrl()}) - : Messages.get("HikariPooledConnectionProvider.errorConnectingWithDataSourceWithCause", - new Object[] {hostSpec.getUrl(), latestConnectException.getMessage()}); - throw new SQLException(errorMessage); + return ds.getConnection(); } @Override @@ -222,41 +181,21 @@ public Connection connect(@NonNull String url, @NonNull Properties props) throws // The pool key should always be retrieved using this method, because the username // must always be included to avoid sharing privileged connections with other users. private String getPoolKey(HostSpec hostSpec, Properties props) { - final StringBuilder sb = new StringBuilder(); - sb.append(poolMapping.getKey(hostSpec, props)) - .append(props.getProperty(PropertyDefinition.USER.name)); - return sb.toString(); - } - - @Override - public void notifyNodeListChanged(Map> changes) { - for (Map.Entry> hostChangesEntry : changes.entrySet()) { - final EnumSet hostChanges = hostChangesEntry.getValue(); - // TODO: do any other node changes need to be checked and processed? - if (!hostChanges.contains(NodeChangeOptions.NODE_DELETED)) { - continue; - } - - final String hostUrl = hostChangesEntry.getKey(); - final Map hostPools = databasePools.get(hostUrl); - if (hostPools != null) { - for (Map.Entry poolEntry : hostPools.entrySet()) { - final HikariDataSource ds = poolEntry.getValue(); - if (ds != null) { - ds.close(); - } - } - hostPools.clear(); - } - databasePools.remove(hostUrl); + if (this.poolMapping != null) { + return this.poolMapping.getKey(hostSpec, props); } + + // Otherwise use default map key + String user = props.getProperty(PropertyDefinition.USER.name); + return user == null ? "" : user; } @Override public void releaseResources() { - databasePools.forEach((url, hostPools) -> { - hostPools.forEach((poolKey, ds) -> ds.close()); - hostPools.clear(); + databasePools.getEntries().forEach((poolKey, pool) -> { + if (!pool.isClosed()) { + pool.close(); + } }); databasePools.clear(); } @@ -318,7 +257,10 @@ public int getHostCount() { * @return a set containing every host URL for which there are one or more connection pool(s). */ public Set getHosts() { - return Collections.unmodifiableSet(databasePools.keySet()); + return Collections.unmodifiableSet( + databasePools.getEntries().keySet().stream() + .map(poolKey -> poolKey.url) + .collect(Collectors.toSet())); } /** @@ -326,12 +268,8 @@ public Set getHosts() { * * @return a set containing every key associated with an active connection pool */ - public Set getKeys() { - Set keys = new HashSet<>(); - for (Map pool : databasePools.values()) { - keys.addAll(pool.keySet()); - } - return keys; + public Set getKeys() { + return databasePools.getEntries().keySet(); } /** @@ -340,7 +278,7 @@ public Set getKeys() { public void logConnections() { LOGGER.finest(() -> { final StringBuilder builder = new StringBuilder(); - databasePools.forEach((key, dataSource) -> { + databasePools.getEntries().forEach((key, dataSource) -> { builder.append("\t[ "); builder.append(key).append(":"); builder.append("\n\t {"); @@ -357,4 +295,43 @@ HikariDataSource createHikariDataSource(String protocol, HostSpec hostSpec, Prop configurePool(config, protocol, hostSpec, props); return new HikariDataSource(config); } + + public static class PoolKey { + private final @NonNull String url; + private final @NonNull String extraKey; + + public PoolKey(final @NonNull String url, final @NonNull String extraKey) { + this.url = url; + this.extraKey = extraKey; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((url == null) ? 0 : url.hashCode()) + ((extraKey == null) ? 0 : extraKey.hashCode()); + return result; + } + + @Override + public boolean equals(final Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final PoolKey other = (PoolKey) obj; + return this.url.equals(other.url) && this.extraKey.equals(other.extraKey); + } + + @Override + public String toString() { + return "PoolKey [url=" + url + ", extraKey=" + extraKey + "]"; + } + + } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java index 7704a9128..aed2722b4 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java @@ -226,7 +226,6 @@ public OldConnectionSuggestedAction notifyConnectionChanged( @Override public void notifyNodeListChanged(final Map> changes) { - this.connProviderManager.notifyNodeListChanged(changes); } List parseMultiStatementQueries(String query) { diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java index eefe85567..141ad509b 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java @@ -18,24 +18,37 @@ import java.util.HashMap; import java.util.Map; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; +import java.util.function.Supplier; public class CacheMap { private final Map> cache = new ConcurrentHashMap<>(); - private final long cleanupIntervalNanos = TimeUnit.MINUTES.toNanos(10); + private long cleanupIntervalNanos = TimeUnit.MINUTES.toNanos(10); private final AtomicLong cleanupTimeNanos = new AtomicLong(System.nanoTime() + cleanupIntervalNanos); public CacheMap() { } + public CacheMap(final long cleanupIntervalNanos) { + this.cleanupIntervalNanos = cleanupIntervalNanos; + } + public V get(final K key) { final CacheItem cacheItem = cache.computeIfPresent(key, (kk, vv) -> vv.isExpired() ? null : vv); return cacheItem == null ? null : cacheItem.item; } + public V getWithExtendExpiration(final K key, final long itemExpirationNano) { + final CacheItem cacheItem = cache.computeIfPresent(key, + (kk, vv) -> vv.isExpired() ? null : vv.withExtendExpiration(itemExpirationNano)); + return cacheItem == null ? null : cacheItem.item; + } + public V get(final K key, final V defaultItemValue, final long itemExpirationNano) { final CacheItem cacheItem = cache.compute(key, (kk, vv) -> (vv == null || vv.isExpired()) @@ -54,6 +67,29 @@ public void putIfAbsent(final K key, final V item, final long itemExpirationNano cleanUp(); } + public V computeIfAbsent( + final K key, + Function mappingFunction, + final long itemExpirationNano) { + return computeIfAbsent(key, mappingFunction, itemExpirationNano, null); + } + + public V computeIfAbsent( + final K key, + Function mappingFunction, + final long itemExpirationNano, + final ItemDisposeFunc itemDisposeFunc) { + + final CacheItem cacheItem = cache.computeIfAbsent( + key, + k -> new CacheItem<>( + mappingFunction.apply(k), + System.nanoTime() + itemExpirationNano, + itemDisposeFunc)); + cleanUp(); + return cacheItem.withExtendExpiration(itemExpirationNano).item; + } + public void remove(final K key) { cache.remove(key); cleanUp(); @@ -81,24 +117,55 @@ private void cleanUp() { cache.forEach((key, value) -> { if (value == null || value.isExpired()) { cache.remove(key); + if (value != null) { + value.dispose(); + } } }); } } + public interface ItemDisposeFunc { + void dispose(V item); + } + private static class CacheItem { - final V item; - final long expirationTime; + private final V item; + private long expirationTime; + + final ItemDisposeFunc disposeFunc; public CacheItem(final V item, final long expirationTime) { + this(item, expirationTime, null); + } + + public CacheItem(final V item, final long expirationTime, final ItemDisposeFunc disposeFunc) { this.item = item; this.expirationTime = expirationTime; + this.disposeFunc = disposeFunc; } boolean isExpired() { return System.nanoTime() > expirationTime; } + public CacheItem withExtendExpiration(final long itemExpirationNano) { + this.expirationTime = System.nanoTime() + itemExpirationNano; + return this; + } + + public void dispose() { + if (this.disposeFunc == null) { + return; + } + + try { + this.disposeFunc.dispose(this.item); + } catch (Exception ex) { + // ignore + } + } + @Override public int hashCode() { final int prime = 31; diff --git a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java index cd2a83cd5..75728ed47 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java @@ -35,6 +35,7 @@ import org.junit.jupiter.api.Test; import org.mockito.Mock; import org.mockito.MockitoAnnotations; +import software.amazon.jdbc.HikariPooledConnectionProvider.PoolKey; import software.amazon.jdbc.dialect.Dialect; class HikariPooledConnectionProviderTest { @@ -45,7 +46,6 @@ class HikariPooledConnectionProviderTest { @Mock Dialect mockDialect; private AutoCloseable closeable; - private static final Properties emptyProperties = new Properties(); @BeforeEach void init() throws SQLException { @@ -62,7 +62,9 @@ void tearDown() throws Exception { @Test void testConnectWithDefaultMapping() throws SQLException { when(mockHostSpec.getUrl()).thenReturn("url"); - final Set expected = new HashSet<>(Collections.singletonList("urlusername")); + final Set expectedUrls = new HashSet<>(Collections.singletonList("url")); + final Set expectedKeys = new HashSet<>( + Collections.singletonList(new PoolKey("url", "username"))); final HikariPooledConnectionProvider provider = spy(new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig)); @@ -77,7 +79,9 @@ void testConnectWithDefaultMapping() throws SQLException { assertEquals(mockConnection, conn); assertEquals(1, provider.getHostCount()); final Set hosts = provider.getHosts(); - assertEquals(expected, hosts); + assertEquals(expectedUrls, hosts); + final Set keys = provider.getKeys(); + assertEquals(expectedKeys, keys); } provider.releaseResources(); @@ -86,7 +90,8 @@ void testConnectWithDefaultMapping() throws SQLException { @Test void testConnectWithCustomMapping() throws SQLException { when(mockHostSpec.getUrl()).thenReturn("url"); - final Set expected = new HashSet<>(Collections.singletonList("url+someUniqueKeyusername")); + final Set expectedKeys = new HashSet<>( + Collections.singletonList(new PoolKey("url", "url+someUniqueKey"))); final HikariPooledConnectionProvider provider = spy(new HikariPooledConnectionProvider( (hostSpec, properties) -> mockConfig, @@ -101,8 +106,8 @@ void testConnectWithCustomMapping() throws SQLException { "protocol", mockDialect, mockHostSpec, props, true)) { assertEquals(mockConnection, conn); assertEquals(1, provider.getHostCount()); - final Set hosts = provider.getKeys(); - assertEquals(expected, hosts); + final Set keys = provider.getKeys(); + assertEquals(expectedKeys, keys); } provider.releaseResources(); From 64c1f8c72d08f07642043fe2b530c9fcbb8aaab4 Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Mon, 15 May 2023 09:40:37 -0700 Subject: [PATCH 04/21] wip - resolve CacheMap enhancements --- .../UsingTheReadWriteSplittingPlugin.md | 2 +- .../ReadWriteSplittingPostgresExample.java | 3 +- .../jdbc/HikariPooledConnectionProvider.java | 25 +++++++++----- .../software/amazon/jdbc/util/CacheMap.java | 34 ++++++++++--------- 4 files changed, 38 insertions(+), 26 deletions(-) diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md index 299212061..479fad5ba 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md @@ -39,7 +39,7 @@ The wrapper driver currently uses [Hikari](https://github.com/brettwooldridge/Hi - username - password -You can optionally pass in a `HikariPoolMapping` function as a second parameter to the `HikariPooledConnectionProvider`. This allows you to decide when new connection pools should be created by defining what is included in the pool map key. A new pool will be created each time a new connection is requested with a unique key. By default, a new pool will be created for each unique instance-user combination. If you would like to define a different key system, you should pass in a `HikariPoolMapping` function defining this logic. Note that the user will always be automatically included in the key for security reasons. Please see [ReadWriteSplittingPostgresExample.java](../../../examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java) for an example of how to configure the pool map key. +You can optionally pass in a `HikariPoolMapping` function as a second parameter to the `HikariPooledConnectionProvider`. This allows you to decide when new connection pools should be created by defining what is included in the pool map key. A new pool will be created each time a new connection is requested with a unique key. By default, a new pool will be created for each unique instance-user combination. If you would like to define a different key system, you should pass in a `HikariPoolMapping` function defining this logic. Please see [ReadWriteSplittingPostgresExample.java](../../../examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java) for an example of how to configure the pool map key. 2. Call `ConnectionProviderManager.setConnectionProvider`, passing in the `HikariPooledConnectionProvider` you created in step 1. diff --git a/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java b/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java index 99de4bf2d..a846534d3 100644 --- a/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java +++ b/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java @@ -167,7 +167,8 @@ private static String getPoolKey(HostSpec hostSpec, Properties props) { // Include the URL and somePropertyValue in the connection pool key so that a new connection // pool will be opened for each different instance-user-somePropertyValue combination. // (Note that the user will automatically be added to the key). + final String user = props.getProperty(PropertyDefinition.USER.name); final String somePropertyValue = props.getProperty("somePropertyValue"); - return hostSpec.getUrl() + somePropertyValue; + return hostSpec.getUrl() + user + somePropertyValue; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index d866db73a..1a721e30e 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -48,12 +48,8 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, private static final String LEAST_CONNECTIONS_STRATEGY = "leastConnections"; private static final RdsUtils rdsUtils = new RdsUtils(); - - // 2-layer map that helps us keep track of how many pools are associated with each database - // instance. The first layer maps from an instance URL to the 2nd map. The 2nd layer maps from - // the key returned from the poolMapping function to the database pool. private static final CacheMap databasePools = new CacheMap<>(); - private static final long poolExpirationNanos = TimeUnit.MINUTES.toNanos(30); + private static long poolExpirationNanos = TimeUnit.MINUTES.toNanos(30); private final HikariPoolConfigurator poolConfigurator; private final HikariPoolMapping poolMapping; protected int retries = 10; @@ -74,7 +70,15 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, * HikariConfig. */ public HikariPooledConnectionProvider(HikariPoolConfigurator hikariPoolConfigurator) { - this(hikariPoolConfigurator, null); + this(hikariPoolConfigurator, null, poolExpirationNanos); + } + + public HikariPooledConnectionProvider(HikariPoolConfigurator hikariPoolConfigurator, HikariPoolMapping poolMapping) { + this(hikariPoolConfigurator, poolMapping, poolExpirationNanos); + } + + public HikariPooledConnectionProvider(HikariPoolConfigurator hikariPoolConfigurator, long poolExpirationNanos) { + this(hikariPoolConfigurator, null, poolExpirationNanos); } /** @@ -97,9 +101,11 @@ public HikariPooledConnectionProvider(HikariPoolConfigurator hikariPoolConfigura */ public HikariPooledConnectionProvider( HikariPoolConfigurator hikariPoolConfigurator, - HikariPoolMapping mapping) { + HikariPoolMapping mapping, + long poolExpirationNanos) { this.poolConfigurator = hikariPoolConfigurator; this.poolMapping = mapping; + this.poolExpirationNanos = poolExpirationNanos; } @Override @@ -164,7 +170,10 @@ public Connection connect( new PoolKey(hostSpec.getUrl(), getPoolKey(hostSpec, props)), (lambdaPoolKey) -> createHikariDataSource(protocol, hostSpec, props), poolExpirationNanos, - (hikariDataSource) -> hikariDataSource.close() + (hikariDataSource) -> { + int numConnections = hikariDataSource.getHikariPoolMXBean().getActiveConnections(); + return numConnections == 0; + } ); ds.setPassword(props.getProperty(PropertyDefinition.PASSWORD.name)); diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java index 141ad509b..c73562b97 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java @@ -18,12 +18,10 @@ import java.util.HashMap; import java.util.Map; -import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; -import java.util.function.Supplier; public class CacheMap { @@ -78,16 +76,16 @@ public V computeIfAbsent( final K key, Function mappingFunction, final long itemExpirationNano, - final ItemDisposeFunc itemDisposeFunc) { + final ItemExpiryFunc itemExpiryFunc) { final CacheItem cacheItem = cache.computeIfAbsent( key, k -> new CacheItem<>( mappingFunction.apply(k), System.nanoTime() + itemExpirationNano, - itemDisposeFunc)); + itemExpiryFunc)); cleanUp(); - return cacheItem.withExtendExpiration(itemExpirationNano).item; + return cacheItem.isExpired() ? null : cacheItem.withExtendExpiration(itemExpirationNano).item; } public void remove(final K key) { @@ -116,33 +114,36 @@ private void cleanUp() { this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); cache.forEach((key, value) -> { if (value == null || value.isExpired()) { - cache.remove(key); if (value != null) { - value.dispose(); + if (value.onExpiry()) { + cache.remove(key); + } + } else { + cache.remove(key); } } }); } } - public interface ItemDisposeFunc { - void dispose(V item); + public interface ItemExpiryFunc { + boolean onExpiry(V item); } private static class CacheItem { private final V item; private long expirationTime; - final ItemDisposeFunc disposeFunc; + final ItemExpiryFunc onExpiryFunc; public CacheItem(final V item, final long expirationTime) { this(item, expirationTime, null); } - public CacheItem(final V item, final long expirationTime, final ItemDisposeFunc disposeFunc) { + public CacheItem(final V item, final long expirationTime, final ItemExpiryFunc onExpiryFunc) { this.item = item; this.expirationTime = expirationTime; - this.disposeFunc = disposeFunc; + this.onExpiryFunc = onExpiryFunc; } boolean isExpired() { @@ -154,15 +155,16 @@ public CacheItem withExtendExpiration(final long itemExpirationNano) { return this; } - public void dispose() { - if (this.disposeFunc == null) { - return; + public boolean onExpiry() { + if (this.onExpiryFunc == null) { + return true; } try { - this.disposeFunc.dispose(this.item); + return this.onExpiryFunc.onExpiry(this.item); } catch (Exception ex) { // ignore + return true; } } From 49c084b97c377cea9bd3d943dfecd3b4188cc162 Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Mon, 15 May 2023 13:40:45 -0700 Subject: [PATCH 05/21] Autoscaling tests passing --- .../UsingTheReadWriteSplittingPlugin.md | 2 + .../jdbc/HikariPooledConnectionProvider.java | 31 ++--- .../software/amazon/jdbc/util/CacheMap.java | 112 ++++++++---------- .../container/tests/AutoscalingTests.java | 74 ++---------- 4 files changed, 70 insertions(+), 149 deletions(-) diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md index 479fad5ba..4db7c0980 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md @@ -41,6 +41,8 @@ The wrapper driver currently uses [Hikari](https://github.com/brettwooldridge/Hi You can optionally pass in a `HikariPoolMapping` function as a second parameter to the `HikariPooledConnectionProvider`. This allows you to decide when new connection pools should be created by defining what is included in the pool map key. A new pool will be created each time a new connection is requested with a unique key. By default, a new pool will be created for each unique instance-user combination. If you would like to define a different key system, you should pass in a `HikariPoolMapping` function defining this logic. Please see [ReadWriteSplittingPostgresExample.java](../../../examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java) for an example of how to configure the pool map key. +> :warning: If you do not include the username in your HikariPoolMapping function, connection pools may be shared between different users. + 2. Call `ConnectionProviderManager.setConnectionProvider`, passing in the `HikariPooledConnectionProvider` you created in step 1. 3. Continue as normal: create connections and use them as needed. diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index 1a721e30e..d6693abf4 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -48,11 +48,13 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, private static final String LEAST_CONNECTIONS_STRATEGY = "leastConnections"; private static final RdsUtils rdsUtils = new RdsUtils(); - private static final CacheMap databasePools = new CacheMap<>(); - private static long poolExpirationNanos = TimeUnit.MINUTES.toNanos(30); + private static final CacheMap databasePools = new CacheMap<>( + (hikariDataSource) -> hikariDataSource.getHikariPoolMXBean().getActiveConnections() > 0, + (hikariDataSource) -> hikariDataSource.close() + ); + private static long poolExpirationCheckNanos = TimeUnit.MINUTES.toNanos(30); private final HikariPoolConfigurator poolConfigurator; private final HikariPoolMapping poolMapping; - protected int retries = 10; /** * {@link HikariPooledConnectionProvider} constructor. This class can be passed to @@ -70,15 +72,13 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, * HikariConfig. */ public HikariPooledConnectionProvider(HikariPoolConfigurator hikariPoolConfigurator) { - this(hikariPoolConfigurator, null, poolExpirationNanos); + this(hikariPoolConfigurator, null); } - public HikariPooledConnectionProvider(HikariPoolConfigurator hikariPoolConfigurator, HikariPoolMapping poolMapping) { - this(hikariPoolConfigurator, poolMapping, poolExpirationNanos); - } - - public HikariPooledConnectionProvider(HikariPoolConfigurator hikariPoolConfigurator, long poolExpirationNanos) { - this(hikariPoolConfigurator, null, poolExpirationNanos); + public HikariPooledConnectionProvider( + HikariPoolConfigurator hikariPoolConfigurator, long expirationCheckNanos) { + this(hikariPoolConfigurator, null); + poolExpirationCheckNanos = expirationCheckNanos; } /** @@ -100,12 +100,9 @@ public HikariPooledConnectionProvider(HikariPoolConfigurator hikariPoolConfigura * generated for each unique key returned by this function. */ public HikariPooledConnectionProvider( - HikariPoolConfigurator hikariPoolConfigurator, - HikariPoolMapping mapping, - long poolExpirationNanos) { + HikariPoolConfigurator hikariPoolConfigurator, HikariPoolMapping mapping) { this.poolConfigurator = hikariPoolConfigurator; this.poolMapping = mapping; - this.poolExpirationNanos = poolExpirationNanos; } @Override @@ -169,11 +166,7 @@ public Connection connect( final HikariDataSource ds = databasePools.computeIfAbsent( new PoolKey(hostSpec.getUrl(), getPoolKey(hostSpec, props)), (lambdaPoolKey) -> createHikariDataSource(protocol, hostSpec, props), - poolExpirationNanos, - (hikariDataSource) -> { - int numConnections = hikariDataSource.getHikariPoolMXBean().getActiveConnections(); - return numConnections == 0; - } + poolExpirationCheckNanos ); ds.setPassword(props.getProperty(PropertyDefinition.PASSWORD.name)); diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java index c73562b97..6510899c9 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java @@ -25,67 +25,65 @@ public class CacheMap { - private final Map> cache = new ConcurrentHashMap<>(); + private final Map cache = new ConcurrentHashMap<>(); private long cleanupIntervalNanos = TimeUnit.MINUTES.toNanos(10); private final AtomicLong cleanupTimeNanos = new AtomicLong(System.nanoTime() + cleanupIntervalNanos); + private final IsItemValidFunc isItemValidFunc; + private final ItemDisposalFunc itemDisposalFunc; public CacheMap() { + this.isItemValidFunc = null; + this.itemDisposalFunc = null; } - public CacheMap(final long cleanupIntervalNanos) { - this.cleanupIntervalNanos = cleanupIntervalNanos; + public CacheMap( + final IsItemValidFunc isItemValidFunc, + final ItemDisposalFunc itemDisposalFunc) { + this.isItemValidFunc = isItemValidFunc; + this.itemDisposalFunc = itemDisposalFunc; } public V get(final K key) { - final CacheItem cacheItem = cache.computeIfPresent(key, (kk, vv) -> vv.isExpired() ? null : vv); + final CacheItem cacheItem = cache.computeIfPresent(key, (kk, vv) -> vv.isExpired() ? null : vv); return cacheItem == null ? null : cacheItem.item; } public V getWithExtendExpiration(final K key, final long itemExpirationNano) { - final CacheItem cacheItem = cache.computeIfPresent(key, + final CacheItem cacheItem = cache.computeIfPresent(key, (kk, vv) -> vv.isExpired() ? null : vv.withExtendExpiration(itemExpirationNano)); return cacheItem == null ? null : cacheItem.item; } public V get(final K key, final V defaultItemValue, final long itemExpirationNano) { - final CacheItem cacheItem = cache.compute(key, + final CacheItem cacheItem = cache.compute(key, (kk, vv) -> (vv == null || vv.isExpired()) - ? new CacheItem<>(defaultItemValue, System.nanoTime() + itemExpirationNano) + ? new CacheItem(defaultItemValue, System.nanoTime() + itemExpirationNano) : vv); return cacheItem.item; } public void put(final K key, final V item, final long itemExpirationNano) { - cache.put(key, new CacheItem<>(item, System.nanoTime() + itemExpirationNano)); + cache.put(key, new CacheItem(item, System.nanoTime() + itemExpirationNano)); cleanUp(); } public void putIfAbsent(final K key, final V item, final long itemExpirationNano) { - cache.putIfAbsent(key, new CacheItem<>(item, System.nanoTime() + itemExpirationNano)); + cache.putIfAbsent(key, new CacheItem(item, System.nanoTime() + itemExpirationNano)); cleanUp(); } - public V computeIfAbsent( - final K key, - Function mappingFunction, - final long itemExpirationNano) { - return computeIfAbsent(key, mappingFunction, itemExpirationNano, null); - } - public V computeIfAbsent( final K key, Function mappingFunction, - final long itemExpirationNano, - final ItemExpiryFunc itemExpiryFunc) { + final long itemExpirationNano) { - final CacheItem cacheItem = cache.computeIfAbsent( + cleanUp(); + final CacheItem cacheItem = cache.computeIfAbsent( key, - k -> new CacheItem<>( + k -> new CacheItem( mappingFunction.apply(k), - System.nanoTime() + itemExpirationNano, - itemExpiryFunc)); - cleanUp(); - return cacheItem.isExpired() ? null : cacheItem.withExtendExpiration(itemExpirationNano).item; + System.nanoTime() + itemExpirationNano)); + return cacheItem.withExtendExpiration(itemExpirationNano).item; } public void remove(final K key) { @@ -99,7 +97,7 @@ public void clear() { public Map getEntries() { final Map entries = new HashMap<>(); - for (final Map.Entry> entry : this.cache.entrySet()) { + for (final Map.Entry entry : this.cache.entrySet()) { entries.put(entry.getKey(), entry.getValue().item); } return entries; @@ -110,64 +108,52 @@ public int size() { } private void cleanUp() { - if (this.cleanupTimeNanos.get() < System.nanoTime()) { - this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); - cache.forEach((key, value) -> { - if (value == null || value.isExpired()) { - if (value != null) { - if (value.onExpiry()) { - cache.remove(key); - } - } else { - cache.remove(key); - } - } - }); + if (this.cleanupTimeNanos.get() > System.nanoTime()) { + return; } + + this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); + cache.forEach((key, value) -> { + if (value != null && !value.isExpired()) { + return; + } + + cache.remove(key); + if (value != null && itemDisposalFunc != null) { + itemDisposalFunc.dispose(value.item); + } + }); + } + + public interface IsItemValidFunc { + boolean isValid(V item); } - public interface ItemExpiryFunc { - boolean onExpiry(V item); + public interface ItemDisposalFunc { + void dispose(V item); } - private static class CacheItem { + private class CacheItem { private final V item; private long expirationTime; - final ItemExpiryFunc onExpiryFunc; - public CacheItem(final V item, final long expirationTime) { - this(item, expirationTime, null); - } - - public CacheItem(final V item, final long expirationTime, final ItemExpiryFunc onExpiryFunc) { this.item = item; this.expirationTime = expirationTime; - this.onExpiryFunc = onExpiryFunc; } boolean isExpired() { + if (isItemValidFunc != null) { + return System.nanoTime() > expirationTime && !isItemValidFunc.isValid(this.item); + } return System.nanoTime() > expirationTime; } - public CacheItem withExtendExpiration(final long itemExpirationNano) { + public CacheItem withExtendExpiration(final long itemExpirationNano) { this.expirationTime = System.nanoTime() + itemExpirationNano; return this; } - public boolean onExpiry() { - if (this.onExpiryFunc == null) { - return true; - } - - try { - return this.onExpiryFunc.onExpiry(this.item); - } catch (Exception ex) { - // ignore - return true; - } - } - @Override public int hashCode() { final int prime = 31; @@ -187,7 +173,7 @@ public boolean equals(final Object obj) { if (getClass() != obj.getClass()) { return false; } - final CacheItem other = (CacheItem) obj; + final CacheItem other = (CacheItem) obj; if (item == null) { return other.item == null; } else { diff --git a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java index 290574d84..2c4cfa756 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java @@ -19,7 +19,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import com.zaxxer.hikari.HikariConfig; @@ -104,7 +103,8 @@ protected HikariPoolConfigurator getHikariConfig(int maxPoolSize) { }; } - // Old connection detects deleted instance when setReadOnly(true) is called + // Call setReadOnly on a ConnectionWrapper with an internal readerConnection to the deleted + // reader. The driver should close the deleted instance pool and connect to a different reader. @TestTemplate public void test_pooledConnectionAutoScaling_setReadOnlyOnOldConnection() throws SQLException, InterruptedException { @@ -116,8 +116,9 @@ public void test_pooledConnectionAutoScaling_setReadOnlyOnOldConnection() final TestEnvironmentInfo testInfo = TestEnvironment.getCurrent().getInfo(); final List instances = testInfo.getDatabaseInfo().getInstances(); + final long poolExpirationNanos = TimeUnit.MINUTES.toNanos(3); final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider(getHikariConfig(instances.size())); + new HikariPooledConnectionProvider(getHikariConfig(instances.size()), poolExpirationNanos); ConnectionProviderManager.setConnectionProvider(provider); final List connections = new ArrayList<>(); @@ -148,10 +149,12 @@ public void test_pooledConnectionAutoScaling_setReadOnlyOnOldConnection() auroraUtil.deleteInstance(newInstance); } - // Should detect that the reader was deleted and remove the associated pool newInstanceConn.setReadOnly(true); + // Connection pool cache should have hit the cleanup threshold and removed the pool for the + // deleted instance. assertFalse(provider.getHosts().stream() .anyMatch((url) -> url.equals(newInstance.getUrl()))); + assertEquals(instances.size(), provider.getHosts().size()); } finally { for (Connection connection : connections) { connection.close(); @@ -161,69 +164,6 @@ public void test_pooledConnectionAutoScaling_setReadOnlyOnOldConnection() } } - // User tries to directly connect to the deleted instance URL - // TODO: Hikari throws an NPE when closing the connection to the instance that is now deleted - @TestTemplate - public void test_pooledConnectionAutoScaling_newConnectionToDeletedReader() - throws SQLException, InterruptedException { - final Properties props = getProps(); - final long topologyRefreshRateMs = 5000; - ReadWriteSplittingPlugin.READER_HOST_SELECTOR_STRATEGY.set(props, "leastConnections"); - AuroraHostListProvider.CLUSTER_TOPOLOGY_REFRESH_RATE_MS.set(props, - Long.toString(topologyRefreshRateMs)); - - final TestEnvironmentInfo testInfo = TestEnvironment.getCurrent().getInfo(); - final List instances = testInfo.getDatabaseInfo().getInstances(); - final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider(getHikariConfig(instances.size() * 5)); - ConnectionProviderManager.setConnectionProvider(provider); - - final List connections = new ArrayList<>(); - try { - for (int i = 1; i < instances.size(); i++) { - final String connString = ConnectionStringHelper.getWrapperUrl(instances.get(i)); - final Connection conn1 = DriverManager.getConnection(connString, props); - connections.add(conn1); - final Connection conn2 = DriverManager.getConnection(connString, props); - connections.add(conn2); - } - - final Connection newInstanceConn; - final TestInstanceInfo newInstance = - auroraUtil.createInstance("auto-scaling-instance"); - try { - newInstanceConn = - DriverManager.getConnection(ConnectionStringHelper.getWrapperUrl(), props); - connections.add(newInstanceConn); - Thread.sleep(topologyRefreshRateMs); - newInstanceConn.setReadOnly(true); - final String readerId = auroraUtil.queryInstanceId(newInstanceConn); - - assertEquals(newInstance.getInstanceId(), readerId); - // Verify that there is a pool for the new instance - assertTrue(provider.getHosts().stream() - .anyMatch((url) -> url.equals(newInstance.getUrl()))); - } finally { - auroraUtil.deleteInstance(newInstance); - } - - Thread.sleep(topologyRefreshRateMs); - assertThrows(SQLException.class, () -> - DriverManager.getConnection(ConnectionStringHelper.getWrapperUrl(newInstance), props)); - - // Verify that the pool for the deleted instance is now removed - assertFalse(provider.getHosts().stream() - .anyMatch((url) -> url.equals(newInstance.getUrl()))); - } finally { - for (Connection connection : connections) { - // Hikari throws NPE when closing the connection to the deleted instance - connection.close(); - } - ConnectionProviderManager.releaseResources(); - ConnectionProviderManager.resetProvider(); - } - } - // Attempt to use a connection to an instance that is now deleted, failover plugin is loaded @TestTemplate public void test_pooledConnectionAutoScaling_failoverFromDeletedReader() From 6f25a83d335fe0f6acac6e86e1a377fe15d4aa4c Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Wed, 17 May 2023 09:12:37 -0700 Subject: [PATCH 06/21] Remove notifyNodeListChanged from rw plugin --- .../ReadWriteSplittingPlugin.java | 55 +++++++------------ .../src/main/resources/messages.properties | 2 + .../container/tests/AutoscalingTests.java | 3 + 3 files changed, 25 insertions(+), 35 deletions(-) diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java index fbeb5bd08..67abc91b1 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java @@ -52,7 +52,6 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin add("initHostProvider"); add("connect"); add("notifyConnectionChanged"); - add("notifyNodeListChanged"); add("Connection.setReadOnly"); } }); @@ -180,36 +179,6 @@ public OldConnectionSuggestedAction notifyConnectionChanged( return OldConnectionSuggestedAction.NO_OPINION; } - @Override - public void notifyNodeListChanged(final Map> changes) { - for (Map.Entry> entry : changes.entrySet()) { - EnumSet hostChanges = entry.getValue(); - if (!hostChanges.contains(NodeChangeOptions.NODE_DELETED)) { - continue; - } - - if (writerHostSpec != null && writerHostSpec.getUrl().equals(entry.getKey())) { - try { - writerConnection.close(); - } catch (SQLException e) { - // Do nothing - } - writerConnection = null; - writerHostSpec = null; - } - - if (readerHostSpec != null && readerHostSpec.getUrl().equals(entry.getKey())) { - try { - readerConnection.close(); - } catch (SQLException e) { - // Do nothing - } - readerConnection = null; - readerHostSpec = null; - } - } - } - @Override public T execute( final Class resultClass, @@ -446,9 +415,25 @@ private synchronized void switchToReaderConnection(final List hosts) if (!isConnectionUsable(this.readerConnection)) { initializeReaderConnection(hosts); } else { - switchCurrentConnectionTo(this.readerConnection, this.readerHostSpec); - LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromWriterToReader", - new Object[] {this.readerHostSpec.getUrl()})); + try { + switchCurrentConnectionTo(this.readerConnection, this.readerHostSpec); + LOGGER.finer(() -> Messages.get("ReadWriteSplittingPlugin.switchedFromWriterToReader", + new Object[] {this.readerHostSpec.getUrl()})); + } catch (SQLException e) { + if (e.getMessage() != null) { + LOGGER.warning( + () -> Messages.get("ReadWriteSplittingPlugin.errorSwitchingToCachedReaderWithCause", + new Object[] {this.readerHostSpec.getUrl(), e.getMessage()})); + } else { + LOGGER.warning(() -> Messages.get("ReadWriteSplittingPlugin.errorSwitchingToCachedReader", + new Object[] {this.readerHostSpec.getUrl()})); + } + + this.readerConnection.close(); + this.readerConnection = null; + this.readerHostSpec = null; + initializeReaderConnection(hosts); + } } } @@ -495,7 +480,7 @@ private void getNewReaderConnection() throws SQLException { readerHost = hostSpec; break; } catch (final SQLException e) { - LOGGER.config( + LOGGER.warning( () -> Messages.get( "ReadWriteSplittingPlugin.failedToConnectToReader", new Object[] { diff --git a/wrapper/src/main/resources/messages.properties b/wrapper/src/main/resources/messages.properties index 6002821b4..5868765bc 100644 --- a/wrapper/src/main/resources/messages.properties +++ b/wrapper/src/main/resources/messages.properties @@ -195,6 +195,8 @@ PropertyUtils.failedToSetPropertyWithReason=Failed to set property ''{0}'' on ta # Read Write Splitting Plugin ReadWriteSplittingPlugin.setReadOnlyOnClosedConnection=setReadOnly cannot be called on a closed connection. +ReadWriteSplittingPlugin.errorSwitchingToCachedReader=An error occurred while trying to switch to a cached reader connection to ''{0}''. The driver will attempt to establish a new reader connection. +ReadWriteSplittingPlugin.errorSwitchingToCachedReaderWithCause=An error occurred while trying to switch to a cached reader connection to ''{0}''. Error message: ''{1}''. The driver will attempt to establish a new reader connection. ReadWriteSplittingPlugin.errorSwitchingToReader=An error occurred while trying to switch to a reader connection. ReadWriteSplittingPlugin.errorSwitchingToWriter=An error occurred while trying to switch to a writer connection. ReadWriteSplittingPlugin.closingInternalConnections=Closing all internal connections except for the current one. diff --git a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java index 2c4cfa756..966da3be5 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java @@ -152,6 +152,9 @@ public void test_pooledConnectionAutoScaling_setReadOnlyOnOldConnection() newInstanceConn.setReadOnly(true); // Connection pool cache should have hit the cleanup threshold and removed the pool for the // deleted instance. + String instanceId = auroraUtil.queryInstanceId(newInstanceConn); + assertNotEquals(instances.get(0).getInstanceId(), instanceId); + assertNotEquals(newInstance.getInstanceId(), instanceId); assertFalse(provider.getHosts().stream() .anyMatch((url) -> url.equals(newInstance.getUrl()))); assertEquals(instances.size(), provider.getHosts().size()); From b9f31519c728391fa134eec7cee8bd0e950e6696 Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Wed, 17 May 2023 09:42:40 -0700 Subject: [PATCH 07/21] Cleanup --- .../benchmarks/ConnectionPluginManagerBenchmarks.java | 2 +- .../software/amazon/jdbc/benchmarks/PluginBenchmarks.java | 2 +- .../amazon/ReadWriteSplittingPostgresExample.java | 6 +++--- .../java/software/amazon/jdbc/ConnectionProvider.java | 3 +-- .../software/amazon/jdbc/ConnectionProviderManager.java | 2 -- .../amazon/jdbc/DataSourceConnectionProvider.java | 3 +-- .../software/amazon/jdbc/DriverConnectionProvider.java | 3 +-- .../amazon/jdbc/HikariPooledConnectionProvider.java | 3 +-- .../amazon/jdbc/plugin/DefaultConnectionPlugin.java | 8 ++++---- .../readwritesplitting/ReadWriteSplittingPlugin.java | 4 ---- .../amazon/jdbc/HikariPooledConnectionProviderTest.java | 6 ++---- 11 files changed, 15 insertions(+), 27 deletions(-) diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java index fb0ea592b..3c81dd755 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java @@ -110,7 +110,7 @@ public void setUpIteration() throws Exception { when(mockConnectionProvider.connect(anyString(), any(Properties.class))).thenReturn( mockConnection); when(mockConnectionProvider.connect(anyString(), any(Dialect.class), any(HostSpec.class), - any(Properties.class), anyBoolean())).thenReturn(mockConnection); + any(Properties.class))).thenReturn(mockConnection); when(mockConnection.createStatement()).thenReturn(mockStatement); when(mockStatement.executeQuery(anyString())).thenReturn(mockResultSet); when(mockResultSet.next()).thenReturn(true, true, false); diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java index e1615e83c..66c89c87b 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/PluginBenchmarks.java @@ -108,7 +108,7 @@ public void setUpIteration() throws Exception { when(mockConnectionProvider.connect(anyString(), any(Properties.class))).thenReturn( mockConnection); when(mockConnectionProvider.connect(anyString(), any(Dialect.class), any(HostSpec.class), - any(Properties.class), anyBoolean())).thenReturn(mockConnection); + any(Properties.class))).thenReturn(mockConnection); when(mockConnection.createStatement()).thenReturn(mockStatement); when(mockStatement.executeQuery(anyString())).thenReturn(mockResultSet); when(mockResultSet.next()).thenReturn(true, true, false); diff --git a/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java b/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java index a846534d3..33a0e8dd6 100644 --- a/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java +++ b/examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java @@ -164,9 +164,9 @@ private static HikariConfig getHikariConfig(HostSpec hostSpec, Properties props) // This method is an optional parameter to `ConnectionProviderManager.setConnectionProvider`. // It can be omitted if you do not require it. private static String getPoolKey(HostSpec hostSpec, Properties props) { - // Include the URL and somePropertyValue in the connection pool key so that a new connection - // pool will be opened for each different instance-user-somePropertyValue combination. - // (Note that the user will automatically be added to the key). + // Include the URL, user, and somePropertyValue in the connection pool key so that a new + // connection pool will be opened for each different instance-user-somePropertyValue + // combination. final String user = props.getProperty(PropertyDefinition.USER.name); final String somePropertyValue = props.getProperty("somePropertyValue"); return hostSpec.getUrl() + user + somePropertyValue; diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java index 897ba69fa..62d6f1e55 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java @@ -83,8 +83,7 @@ Connection connect( @NonNull String protocol, @NonNull Dialect dialect, @NonNull HostSpec hostSpec, - @NonNull Properties props, - boolean isInitialConnection) + @NonNull Properties props) throws SQLException; /** diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java index 5c3a7dc5c..74cadf021 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProviderManager.java @@ -17,9 +17,7 @@ package software.amazon.jdbc; import java.sql.SQLException; -import java.util.EnumSet; import java.util.List; -import java.util.Map; import java.util.Properties; import java.util.concurrent.locks.ReentrantReadWriteLock; import software.amazon.jdbc.cleanup.CanReleaseResources; diff --git a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java index 814d6f12c..392be1f64 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java @@ -123,8 +123,7 @@ public Connection connect( final @NonNull String protocol, final @NonNull Dialect dialect, final @NonNull HostSpec hostSpec, - final @NonNull Properties props, - final boolean isInitialConnection) + final @NonNull Properties props) throws SQLException { final Properties copy = PropertyUtils.copyProperties(props); diff --git a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java index 065db297a..fa5e7a463 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java @@ -102,8 +102,7 @@ public Connection connect( final @NonNull String protocol, final @NonNull Dialect dialect, final @NonNull HostSpec hostSpec, - final @NonNull Properties props, - final boolean isInitialConnection) + final @NonNull Properties props) throws SQLException { final String databaseName = PropertyDefinition.DATABASE.getString(props) != null diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index d6693abf4..498431b47 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -159,8 +159,7 @@ public Connection connect( @NonNull String protocol, @NonNull Dialect dialect, @NonNull HostSpec hostSpec, - @NonNull Properties props, - boolean isInitialConnection) + @NonNull Properties props) throws SQLException { final HikariDataSource ds = databasePools.computeIfAbsent( diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java index aed2722b4..d1be6313d 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java @@ -145,18 +145,17 @@ public Connection connect( // It's guaranteed that this plugin is always the last in plugin chain so connectFunc can be // ignored. - return connectInternal(driverProtocol, hostSpec, props, isInitialConnection, connProvider); + return connectInternal(driverProtocol, hostSpec, props, connProvider); } private Connection connectInternal( String driverProtocol, HostSpec hostSpec, Properties props, - boolean isInitialConnection, ConnectionProvider connProvider) throws SQLException { final Connection conn = connProvider.connect( - driverProtocol, this.pluginService.getDialect(), hostSpec, props, isInitialConnection); + driverProtocol, this.pluginService.getDialect(), hostSpec, props); this.pluginService.setAvailability(hostSpec.asAliases(), HostAvailability.AVAILABLE); this.pluginService.updateDialect(conn); @@ -176,7 +175,7 @@ public Connection forceConnect( // It's guaranteed that this plugin is always the last in plugin chain so forceConnectFunc can be // ignored. - return connectInternal(driverProtocol, hostSpec, props, isInitialConnection, connProvider); + return connectInternal(driverProtocol, hostSpec, props, connProvider); } @Override @@ -226,6 +225,7 @@ public OldConnectionSuggestedAction notifyConnectionChanged( @Override public void notifyNodeListChanged(final Map> changes) { + // do nothing } List parseMultiStatementQueries(String query) { diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java index 67abc91b1..e8ddcbd10 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/readwritesplitting/ReadWriteSplittingPlugin.java @@ -22,7 +22,6 @@ import java.util.EnumSet; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.logging.Logger; @@ -63,7 +62,6 @@ public class ReadWriteSplittingPlugin extends AbstractConnectionPlugin private volatile boolean inReadWriteSplit = false; private HostListProviderService hostListProviderService; private Connection writerConnection; - private HostSpec writerHostSpec; private Connection readerConnection; private HostSpec readerHostSpec; @@ -252,7 +250,6 @@ private void getNewWriterConnection(final HostSpec writerHostSpec) throws SQLExc private void setWriterConnection(final Connection writerConnection, final HostSpec writerHostSpec) { this.writerConnection = writerConnection; - this.writerHostSpec = writerHostSpec; LOGGER.finest( () -> Messages.get( "ReadWriteSplittingPlugin.setWriterConnection", @@ -526,7 +523,6 @@ private void closeConnectionIfIdle(final Connection internalConnection) { internalConnection.close(); if (internalConnection == writerConnection) { writerConnection = null; - writerHostSpec = null; } if (internalConnection == readerConnection) { diff --git a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java index 75728ed47..106315eb1 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java @@ -74,8 +74,7 @@ void testConnectWithDefaultMapping() throws SQLException { Properties props = new Properties(); props.setProperty(PropertyDefinition.USER.name, "username"); props.setProperty(PropertyDefinition.PASSWORD.name, "password"); - try (Connection conn = provider.connect( - "protocol", mockDialect, mockHostSpec, props, true)) { + try (Connection conn = provider.connect("protocol", mockDialect, mockHostSpec, props)) { assertEquals(mockConnection, conn); assertEquals(1, provider.getHostCount()); final Set hosts = provider.getHosts(); @@ -102,8 +101,7 @@ void testConnectWithCustomMapping() throws SQLException { Properties props = new Properties(); props.setProperty(PropertyDefinition.USER.name, "username"); props.setProperty(PropertyDefinition.PASSWORD.name, "password"); - try (Connection conn = provider.connect( - "protocol", mockDialect, mockHostSpec, props, true)) { + try (Connection conn = provider.connect("protocol", mockDialect, mockHostSpec, props)) { assertEquals(mockConnection, conn); assertEquals(1, provider.getHostCount()); final Set keys = provider.getKeys(); From 9c900a72008e31994c8389b939e4ac02d74fab83 Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Wed, 17 May 2023 10:03:40 -0700 Subject: [PATCH 08/21] More cleanup --- .../ConnectionPluginManagerBenchmarks.java | 24 +++++++++---------- .../amazon/jdbc/ConnectionProvider.java | 2 -- .../jdbc/DataSourceConnectionProvider.java | 1 - .../amazon/jdbc/DriverConnectionProvider.java | 1 - .../jdbc/plugin/DefaultConnectionPlugin.java | 9 +++---- .../software/amazon/jdbc/util/CacheMap.java | 18 +++++++------- .../refactored/TestEnvironmentInfo.java | 9 ------- 7 files changed, 23 insertions(+), 41 deletions(-) diff --git a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java index 3c81dd755..661189023 100644 --- a/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java +++ b/benchmarks/src/jmh/java/software/amazon/jdbc/benchmarks/ConnectionPluginManagerBenchmarks.java @@ -17,12 +17,21 @@ package software.amazon.jdbc.benchmarks; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; import static org.mockito.MockitoAnnotations.openMocks; +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.Collections; +import java.util.EnumSet; +import java.util.List; +import java.util.Properties; +import java.util.concurrent.TimeUnit; import org.mockito.Mock; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.BenchmarkMode; @@ -51,22 +60,11 @@ import software.amazon.jdbc.PluginManagerService; import software.amazon.jdbc.PluginService; import software.amazon.jdbc.PropertyDefinition; +import software.amazon.jdbc.benchmarks.testplugin.BenchmarkPluginFactory; import software.amazon.jdbc.dialect.Dialect; import software.amazon.jdbc.profile.DriverConfigurationProfiles; -import software.amazon.jdbc.benchmarks.testplugin.BenchmarkPluginFactory; import software.amazon.jdbc.wrapper.ConnectionWrapper; -import java.sql.Connection; -import java.sql.ResultSet; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.ArrayList; -import java.util.Collections; -import java.util.EnumSet; -import java.util.List; -import java.util.Properties; -import java.util.concurrent.TimeUnit; - @State(Scope.Benchmark) @Fork(3) @Warmup(iterations = 3) diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java index 62d6f1e55..5ee4289b0 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java @@ -18,9 +18,7 @@ import java.sql.Connection; import java.sql.SQLException; -import java.util.EnumSet; import java.util.List; -import java.util.Map; import java.util.Properties; import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.dialect.Dialect; diff --git a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java index 392be1f64..4a00655cc 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DataSourceConnectionProvider.java @@ -22,7 +22,6 @@ import java.sql.Connection; import java.sql.SQLException; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; diff --git a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java index fa5e7a463..4c97729d5 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/DriverConnectionProvider.java @@ -19,7 +19,6 @@ import java.sql.Connection; import java.sql.SQLException; import java.util.Collections; -import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; diff --git a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java index d1be6313d..16654132f 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java +++ b/wrapper/src/main/java/software/amazon/jdbc/plugin/DefaultConnectionPlugin.java @@ -149,13 +149,10 @@ public Connection connect( } private Connection connectInternal( - String driverProtocol, - HostSpec hostSpec, - Properties props, - ConnectionProvider connProvider) + String driverProtocol, HostSpec hostSpec, Properties props, ConnectionProvider connProvider) throws SQLException { - final Connection conn = connProvider.connect( - driverProtocol, this.pluginService.getDialect(), hostSpec, props); + final Connection conn = + connProvider.connect(driverProtocol, this.pluginService.getDialect(), hostSpec, props); this.pluginService.setAvailability(hostSpec.asAliases(), HostAvailability.AVAILABLE); this.pluginService.updateDialect(conn); diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java index 6510899c9..8d2ae0695 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java @@ -48,12 +48,6 @@ public V get(final K key) { return cacheItem == null ? null : cacheItem.item; } - public V getWithExtendExpiration(final K key, final long itemExpirationNano) { - final CacheItem cacheItem = cache.computeIfPresent(key, - (kk, vv) -> vv.isExpired() ? null : vv.withExtendExpiration(itemExpirationNano)); - return cacheItem == null ? null : cacheItem.item; - } - public V get(final K key, final V defaultItemValue, final long itemExpirationNano) { final CacheItem cacheItem = cache.compute(key, (kk, vv) -> (vv == null || vv.isExpired()) @@ -62,6 +56,12 @@ public V get(final K key, final V defaultItemValue, final long itemExpirationNan return cacheItem.item; } + public V getWithExtendExpiration(final K key, final long itemExpirationNano) { + final CacheItem cacheItem = cache.computeIfPresent(key, + (kk, vv) -> vv.isExpired() ? null : vv.withExtendExpiration(itemExpirationNano)); + return cacheItem == null ? null : cacheItem.item; + } + public void put(final K key, final V item, final long itemExpirationNano) { cache.put(key, new CacheItem(item, System.nanoTime() + itemExpirationNano)); cleanUp(); @@ -73,9 +73,9 @@ public void putIfAbsent(final K key, final V item, final long itemExpirationNano } public V computeIfAbsent( - final K key, - Function mappingFunction, - final long itemExpirationNano) { + final K key, + Function mappingFunction, + final long itemExpirationNano) { cleanUp(); final CacheItem cacheItem = cache.computeIfAbsent( diff --git a/wrapper/src/test/java/integration/refactored/TestEnvironmentInfo.java b/wrapper/src/test/java/integration/refactored/TestEnvironmentInfo.java index b4e4e1d6f..adba3e4ab 100644 --- a/wrapper/src/test/java/integration/refactored/TestEnvironmentInfo.java +++ b/wrapper/src/test/java/integration/refactored/TestEnvironmentInfo.java @@ -32,7 +32,6 @@ public class TestEnvironmentInfo { private TestProxyDatabaseInfo proxyDatabaseInfo; private String databaseEngine; private String databaseEngineVersion; - private String databaseInstanceClass; public TestDatabaseInfo getDatabaseInfo() { return this.databaseInfo; @@ -50,10 +49,6 @@ public String getDatabaseEngineVersion() { return databaseEngineVersion; } - public String getDatabaseInstanceClass() { - return databaseInstanceClass; - } - public TestEnvironmentRequest getRequest() { return this.request; } @@ -110,10 +105,6 @@ public void setDatabaseEngineVersion(String databaseEngineVersion) { this.databaseEngineVersion = databaseEngineVersion; } - public void setDatabaseInstanceClass(String databaseInstanceClass) { - this.databaseInstanceClass = databaseInstanceClass; - } - public void setAwsAccessKeyId(String awsAccessKeyId) { this.awsAccessKeyId = awsAccessKeyId; } From 414a55a8656d8d406b20d77346e59f30ea4ec1d9 Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Wed, 17 May 2023 14:14:33 -0700 Subject: [PATCH 09/21] Add HikariConnectionProvider unit tests --- .../jdbc/HikariPooledConnectionProvider.java | 8 +- .../HikariPooledConnectionProviderTest.java | 113 ++++++++++++++++-- 2 files changed, 113 insertions(+), 8 deletions(-) diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index 498431b47..ae436a2d7 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -48,7 +48,7 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, private static final String LEAST_CONNECTIONS_STRATEGY = "leastConnections"; private static final RdsUtils rdsUtils = new RdsUtils(); - private static final CacheMap databasePools = new CacheMap<>( + private static CacheMap databasePools = new CacheMap<>( (hikariDataSource) -> hikariDataSource.getHikariPoolMXBean().getActiveConnections() > 0, (hikariDataSource) -> hikariDataSource.close() ); @@ -335,4 +335,10 @@ public String toString() { } } + + // For testing purposes only + void setDatabasePools(CacheMap connectionPools) { + databasePools = connectionPools; + } + } diff --git a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java index 106315eb1..c284ab48a 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java @@ -17,19 +17,27 @@ package software.amazon.jdbc; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import com.zaxxer.hikari.HikariConfig; import com.zaxxer.hikari.HikariDataSource; +import com.zaxxer.hikari.HikariPoolMXBean; import java.sql.Connection; import java.sql.SQLException; +import java.util.ArrayList; import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Properties; import java.util.Set; +import java.util.concurrent.TimeUnit; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -37,6 +45,7 @@ import org.mockito.MockitoAnnotations; import software.amazon.jdbc.HikariPooledConnectionProvider.PoolKey; import software.amazon.jdbc.dialect.Dialect; +import software.amazon.jdbc.util.CacheMap; class HikariPooledConnectionProviderTest { @Mock Connection mockConnection; @@ -44,14 +53,58 @@ class HikariPooledConnectionProviderTest { @Mock HostSpec mockHostSpec; @Mock HikariConfig mockConfig; @Mock Dialect mockDialect; + @Mock HikariDataSource dsWithNoConnections; + @Mock HikariDataSource dsWith1Connection; + @Mock HikariDataSource dsWith2Connections; + @Mock HikariPoolMXBean mxBeanWithNoConnections; + @Mock HikariPoolMXBean mxBeanWith1Connection; + @Mock HikariPoolMXBean mxBeanWith2Connections; + private static final String LEAST_CONNECTIONS = "leastConnections"; + private final int port = 5432; + private final String user1 = "user1"; + private final String user2 = "user2"; + private final String password = "password"; + private final String db = "mydb"; + private final String writerUrlNoConnections = "writerWithNoConnections.XYZ.us-east-1.rds.amazonaws.com"; + private final HostSpec writerHostNoConnections = new HostSpec(writerUrlNoConnections, port, HostRole.WRITER); + private final String readerUrl1Connection = "readerWith1connection.XYZ.us-east-1.rds.amazonaws.com"; + private final HostSpec readerHost1Connection = new HostSpec(readerUrl1Connection, port, HostRole.READER); + private final String readerUrl2Connections = "readerWith2connections.XYZ.us-east-1.rds.amazonaws.com"; + private final HostSpec readerHost2Connections = new HostSpec(readerUrl2Connections, port, HostRole.READER); + private final String protocol = "protocol://"; + + private List testHosts = getTestHosts(); + private final Properties defaultProps = getDefaultProps(); private AutoCloseable closeable; + private List getTestHosts() { + List hosts = new ArrayList<>(); + hosts.add(writerHostNoConnections); + hosts.add(readerHost1Connection); + hosts.add(readerHost2Connections); + return hosts; + } + + private Properties getDefaultProps() { + Properties props = new Properties(); + props.setProperty(PropertyDefinition.USER.name, user1); + props.setProperty(PropertyDefinition.PASSWORD.name, password); + props.setProperty(PropertyDefinition.DATABASE.name, db); + return props; + } + @BeforeEach void init() throws SQLException { closeable = MockitoAnnotations.openMocks(this); when(mockDataSource.getConnection()).thenReturn(mockConnection); when(mockConnection.isValid(any(Integer.class))).thenReturn(true); + when(dsWithNoConnections.getHikariPoolMXBean()).thenReturn(mxBeanWithNoConnections); + when(mxBeanWithNoConnections.getActiveConnections()).thenReturn(0); + when(dsWith1Connection.getHikariPoolMXBean()).thenReturn(mxBeanWith1Connection); + when(mxBeanWith1Connection.getActiveConnections()).thenReturn(1); + when(dsWith2Connections.getHikariPoolMXBean()).thenReturn(mxBeanWith2Connections); + when(mxBeanWith2Connections.getActiveConnections()).thenReturn(2); } @AfterEach @@ -64,7 +117,7 @@ void testConnectWithDefaultMapping() throws SQLException { when(mockHostSpec.getUrl()).thenReturn("url"); final Set expectedUrls = new HashSet<>(Collections.singletonList("url")); final Set expectedKeys = new HashSet<>( - Collections.singletonList(new PoolKey("url", "username"))); + Collections.singletonList(new PoolKey("url", user1))); final HikariPooledConnectionProvider provider = spy(new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig)); @@ -72,9 +125,9 @@ void testConnectWithDefaultMapping() throws SQLException { doReturn(mockDataSource).when(provider).createHikariDataSource(any(), any(), any()); Properties props = new Properties(); - props.setProperty(PropertyDefinition.USER.name, "username"); - props.setProperty(PropertyDefinition.PASSWORD.name, "password"); - try (Connection conn = provider.connect("protocol", mockDialect, mockHostSpec, props)) { + props.setProperty(PropertyDefinition.USER.name, user1); + props.setProperty(PropertyDefinition.PASSWORD.name, password); + try (Connection conn = provider.connect(protocol, mockDialect, mockHostSpec, props)) { assertEquals(mockConnection, conn); assertEquals(1, provider.getHostCount()); final Set hosts = provider.getHosts(); @@ -99,9 +152,9 @@ void testConnectWithCustomMapping() throws SQLException { doReturn(mockDataSource).when(provider).createHikariDataSource(any(), any(), any()); Properties props = new Properties(); - props.setProperty(PropertyDefinition.USER.name, "username"); - props.setProperty(PropertyDefinition.PASSWORD.name, "password"); - try (Connection conn = provider.connect("protocol", mockDialect, mockHostSpec, props)) { + props.setProperty(PropertyDefinition.USER.name, user1); + props.setProperty(PropertyDefinition.PASSWORD.name, password); + try (Connection conn = provider.connect(protocol, mockDialect, mockHostSpec, props)) { assertEquals(mockConnection, conn); assertEquals(1, provider.getHostCount()); final Set keys = provider.getKeys(); @@ -110,4 +163,50 @@ void testConnectWithCustomMapping() throws SQLException { provider.releaseResources(); } + + @Test + public void testAcceptsUrl() { + final String clusterUrl = "my-database.cluster-XYZ.us-east-1.rds.amazonaws.com"; + final HikariPooledConnectionProvider connectionProvider = + new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); + + assertTrue( + connectionProvider.acceptsUrl(protocol, new HostSpec(readerUrl2Connections), defaultProps)); + assertFalse( + connectionProvider.acceptsUrl(protocol, new HostSpec(clusterUrl), defaultProps)); + } + + @Test + public void testLeastConnectionsStrategy() throws SQLException { + final HikariPooledConnectionProvider connectionProvider = + new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); + connectionProvider.setDatabasePools(getTestPoolMap()); + + assertThrows(UnsupportedOperationException.class, () -> + connectionProvider.getHostSpecByStrategy(testHosts, HostRole.READER, "random")); + HostSpec selectedHost = connectionProvider.getHostSpecByStrategy(testHosts, HostRole.READER, LEAST_CONNECTIONS); + // Other reader has 2 connections + assertEquals(readerUrl1Connection, selectedHost.getHost()); + } + + private CacheMap getTestPoolMap() { + CacheMap map = new CacheMap<>(); + map.put(new PoolKey(readerHost2Connections.getUrl(), user1), dsWith1Connection, TimeUnit.MINUTES.toNanos(10)); + map.put(new PoolKey(readerHost2Connections.getUrl(), user2), dsWith1Connection, TimeUnit.MINUTES.toNanos(10)); + map.put(new PoolKey(readerHost1Connection.getUrl(), user1), dsWith1Connection, TimeUnit.MINUTES.toNanos(10)); + return map; + } + + @Test + public void testConfigurePool() { + final HikariPooledConnectionProvider connectionProvider = + new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); + final String expectedJdbcUrl = + protocol + readerHost1Connection.getUrl() + db + "?database=" + db; + + connectionProvider.configurePool(mockConfig, protocol, readerHost1Connection, defaultProps); + verify(mockConfig).setJdbcUrl(expectedJdbcUrl); + verify(mockConfig).setUsername(user1); + verify(mockConfig).setPassword(password); + } } From d73bdddef38ad259737e851c33fd48e15ac66a62 Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Wed, 17 May 2023 15:58:36 -0700 Subject: [PATCH 10/21] More tests --- .../container/tests/AutoscalingTests.java | 12 +++++++ .../HikariPooledConnectionProviderTest.java | 32 +++++++++++++------ 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java index 966da3be5..e842affd3 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java @@ -20,6 +20,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import com.zaxxer.hikari.HikariConfig; import integration.refactored.DatabaseEngineDeployment; @@ -116,6 +117,7 @@ public void test_pooledConnectionAutoScaling_setReadOnlyOnOldConnection() final TestEnvironmentInfo testInfo = TestEnvironment.getCurrent().getInfo(); final List instances = testInfo.getDatabaseInfo().getInstances(); + final int originalClusterSize = instances.size(); final long poolExpirationNanos = TimeUnit.MINUTES.toNanos(3); final HikariPooledConnectionProvider provider = new HikariPooledConnectionProvider(getHikariConfig(instances.size()), poolExpirationNanos); @@ -149,6 +151,16 @@ public void test_pooledConnectionAutoScaling_setReadOnlyOnOldConnection() auroraUtil.deleteInstance(newInstance); } + final long deletionCheckTimeout = System.nanoTime() + TimeUnit.MINUTES.toNanos(5); + while (System.nanoTime() < deletionCheckTimeout + && auroraUtil.getAuroraInstanceIds().size() != originalClusterSize) { + TimeUnit.SECONDS.sleep(5); + } + + if (auroraUtil.getAuroraInstanceIds().size() != originalClusterSize) { + fail("The deleted instance is still in the cluster topology"); + } + newInstanceConn.setReadOnly(true); // Connection pool cache should have hit the cleanup threshold and removed the pool for the // deleted instance. diff --git a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java index c284ab48a..b3031a838 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java @@ -21,6 +21,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; @@ -167,24 +168,24 @@ void testConnectWithCustomMapping() throws SQLException { @Test public void testAcceptsUrl() { final String clusterUrl = "my-database.cluster-XYZ.us-east-1.rds.amazonaws.com"; - final HikariPooledConnectionProvider connectionProvider = + final HikariPooledConnectionProvider provider = new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); assertTrue( - connectionProvider.acceptsUrl(protocol, new HostSpec(readerUrl2Connections), defaultProps)); + provider.acceptsUrl(protocol, new HostSpec(readerUrl2Connections), defaultProps)); assertFalse( - connectionProvider.acceptsUrl(protocol, new HostSpec(clusterUrl), defaultProps)); + provider.acceptsUrl(protocol, new HostSpec(clusterUrl), defaultProps)); } @Test public void testLeastConnectionsStrategy() throws SQLException { - final HikariPooledConnectionProvider connectionProvider = + final HikariPooledConnectionProvider provider = new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); - connectionProvider.setDatabasePools(getTestPoolMap()); + provider.setDatabasePools(getTestPoolMap()); assertThrows(UnsupportedOperationException.class, () -> - connectionProvider.getHostSpecByStrategy(testHosts, HostRole.READER, "random")); - HostSpec selectedHost = connectionProvider.getHostSpecByStrategy(testHosts, HostRole.READER, LEAST_CONNECTIONS); + provider.getHostSpecByStrategy(testHosts, HostRole.READER, "random")); + HostSpec selectedHost = provider.getHostSpecByStrategy(testHosts, HostRole.READER, LEAST_CONNECTIONS); // Other reader has 2 connections assertEquals(readerUrl1Connection, selectedHost.getHost()); } @@ -199,14 +200,27 @@ private CacheMap getTestPoolMap() { @Test public void testConfigurePool() { - final HikariPooledConnectionProvider connectionProvider = + final HikariPooledConnectionProvider provider = new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); final String expectedJdbcUrl = protocol + readerHost1Connection.getUrl() + db + "?database=" + db; - connectionProvider.configurePool(mockConfig, protocol, readerHost1Connection, defaultProps); + provider.configurePool(mockConfig, protocol, readerHost1Connection, defaultProps); verify(mockConfig).setJdbcUrl(expectedJdbcUrl); verify(mockConfig).setUsername(user1); verify(mockConfig).setPassword(password); } + + @Test + public void testConnectToDeletedInstance() throws SQLException { + final HikariPooledConnectionProvider provider = + spy(new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig)); + + doReturn(mockDataSource).when(provider) + .createHikariDataSource(eq(protocol), eq(readerHost1Connection), eq(defaultProps)); + when(mockDataSource.getConnection()).thenThrow(SQLException.class); + + assertThrows(SQLException.class, + () -> provider.connect(protocol, mockDialect, readerHost1Connection, defaultProps)); + } } From 85d299b885cd4cd4d9e51857e2b945ac9f8d0a91 Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Thu, 18 May 2023 13:32:10 -0700 Subject: [PATCH 11/21] Dispose of items when they are removed from the CacheMap - also added unit tests --- .../software/amazon/jdbc/util/CacheMap.java | 83 +++++-- .../amazon/jdbc/util/CacheMapTest.java | 206 ++++++++++++++++++ 2 files changed, 276 insertions(+), 13 deletions(-) create mode 100644 wrapper/src/test/java/software/amazon/jdbc/util/CacheMapTest.java diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java index 8d2ae0695..910ad7f38 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java @@ -44,32 +44,63 @@ public CacheMap( } public V get(final K key) { - final CacheItem cacheItem = cache.computeIfPresent(key, (kk, vv) -> vv.isExpired() ? null : vv); - return cacheItem == null ? null : cacheItem.item; + final CacheItem cacheItem = cache.get(key); + if (cacheItem == null || cacheItem.isExpired()) { + cache.remove(key); + if (cacheItem != null && itemDisposalFunc != null) { + itemDisposalFunc.dispose(cacheItem.item); + } + return null; + } else { + return cacheItem.item; + } } public V get(final K key, final V defaultItemValue, final long itemExpirationNano) { - final CacheItem cacheItem = cache.compute(key, - (kk, vv) -> (vv == null || vv.isExpired()) - ? new CacheItem(defaultItemValue, System.nanoTime() + itemExpirationNano) - : vv); + CacheItem cacheItem = cache.get(key); + if (cacheItem == null || cacheItem.isExpired()) { + cache.remove(key); + if (cacheItem != null && itemDisposalFunc != null) { + itemDisposalFunc.dispose(cacheItem.item); + } + cacheItem = new CacheItem(defaultItemValue, System.nanoTime() + itemExpirationNano); + cache.put(key, cacheItem); + } return cacheItem.item; } public V getWithExtendExpiration(final K key, final long itemExpirationNano) { - final CacheItem cacheItem = cache.computeIfPresent(key, - (kk, vv) -> vv.isExpired() ? null : vv.withExtendExpiration(itemExpirationNano)); - return cacheItem == null ? null : cacheItem.item; + final CacheItem cacheItem = cache.get(key); + if (cacheItem == null || cacheItem.isExpired()) { + cache.remove(key); + if (cacheItem != null && itemDisposalFunc != null) { + itemDisposalFunc.dispose(cacheItem.item); + } + return null; + } else { + return cacheItem.withExtendExpiration(itemExpirationNano).item; + } } public void put(final K key, final V item, final long itemExpirationNano) { - cache.put(key, new CacheItem(item, System.nanoTime() + itemExpirationNano)); cleanUp(); + final CacheItem oldItem = cache.get(key); + if (oldItem != null && itemDisposalFunc != null) { + itemDisposalFunc.dispose(oldItem.item); + } + cache.put(key, new CacheItem(item, System.nanoTime() + itemExpirationNano)); } public void putIfAbsent(final K key, final V item, final long itemExpirationNano) { - cache.putIfAbsent(key, new CacheItem(item, System.nanoTime() + itemExpirationNano)); cleanUp(); + final CacheItem currentItem = cache.get(key); + if (currentItem == null || currentItem.isExpired()) { + cache.remove(key); + if (currentItem != null && itemDisposalFunc != null) { + itemDisposalFunc.dispose(currentItem.item); + } + } + cache.putIfAbsent(key, new CacheItem(item, System.nanoTime() + itemExpirationNano)); } public V computeIfAbsent( @@ -78,6 +109,13 @@ public V computeIfAbsent( final long itemExpirationNano) { cleanUp(); + final CacheItem currentItem = cache.get(key); + if (currentItem == null || currentItem.isExpired()) { + cache.remove(key); + if (currentItem != null && itemDisposalFunc != null) { + itemDisposalFunc.dispose(currentItem.item); + } + } final CacheItem cacheItem = cache.computeIfAbsent( key, k -> new CacheItem( @@ -87,11 +125,20 @@ public V computeIfAbsent( } public void remove(final K key) { - cache.remove(key); cleanUp(); + CacheItem cacheItem = cache.remove(key); + if (itemDisposalFunc != null) { + itemDisposalFunc.dispose(cacheItem.item); + } } public void clear() { + for (K key : cache.keySet()) { + CacheItem cacheItem = cache.remove(key); + if (itemDisposalFunc != null) { + itemDisposalFunc.dispose(cacheItem.item); + } + } cache.clear(); } @@ -133,7 +180,7 @@ public interface ItemDisposalFunc { void dispose(V item); } - private class CacheItem { + class CacheItem { private final V item; private long expirationTime; @@ -186,4 +233,14 @@ public String toString() { return "CacheItem [item=" + item + ", expirationTime=" + expirationTime + "]"; } } + + // For testing purposes + void setCleanupIntervalNanos(long cleanupIntervalNanos) { + this.cleanupIntervalNanos = cleanupIntervalNanos; + this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); + } + + Map getCache() { + return cache; + } } diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/CacheMapTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/CacheMapTest.java new file mode 100644 index 000000000..a60c57cde --- /dev/null +++ b/wrapper/src/test/java/software/amazon/jdbc/util/CacheMapTest.java @@ -0,0 +1,206 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +class CacheMapTest { + + @Mock CacheMap.ItemDisposalFunc mockDisposalFunc; + @Mock CacheMap.IsItemValidFunc mockIsItemValidFunc; + private AutoCloseable closeable; + + @BeforeEach + void init() { + closeable = MockitoAnnotations.openMocks(this); + } + + @AfterEach + void tearDown() throws Exception { + closeable.close(); + } + + @Test + public void testGet() throws InterruptedException { + final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + map.put(1, "a", timeoutNanos); + + assertEquals("a", map.get(1)); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + assertNull(map.get(1)); + assertEquals(0, map.getCache().size()); + verify(mockIsItemValidFunc, times(1)).isValid(any()); + verify(mockDisposalFunc, times(1)).dispose(any()); + } + + @Test + public void testGetWithDefault() throws InterruptedException { + final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + map.put(1, "a", timeoutNanos); + + assertEquals("a", map.get(1, "b", timeoutNanos)); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + assertEquals("b", map.get(1, "b", timeoutNanos)); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + assertNull(map.get(1)); + assertEquals(0, map.getCache().size()); + verify(mockIsItemValidFunc, times(1)).isValid(eq("a")); + verify(mockIsItemValidFunc, times(1)).isValid(eq("b")); + verify(mockDisposalFunc, times(1)).dispose(eq("a")); + verify(mockDisposalFunc, times(1)).dispose(eq("b")); + } + + @Test + public void testGetWithExtendExpiration() throws InterruptedException { + final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + map.put(1, "a", TimeUnit.SECONDS.toNanos(timeoutNanos)); + TimeUnit.NANOSECONDS.sleep(timeoutNanos / 2); + + map.getWithExtendExpiration(1, timeoutNanos); + TimeUnit.NANOSECONDS.sleep(timeoutNanos / 2); + + assertEquals("a", map.get(1)); + TimeUnit.NANOSECONDS.sleep(timeoutNanos / 2); + assertNull(map.getWithExtendExpiration(1, timeoutNanos)); + verify(mockIsItemValidFunc, times(1)).isValid(any()); + verify(mockDisposalFunc, times(1)).dispose(any()); + } + + @Test + public void testPut() throws InterruptedException { + final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + + map.put(1, "a", timeoutNanos); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + map.put(2, "b", timeoutNanos); + map.put(2, "c", timeoutNanos); + + assertNull(map.get(1)); + assertEquals("c", map.get(2)); + verify(mockIsItemValidFunc, times(1)).isValid(eq("a")); + verify(mockDisposalFunc, times(1)).dispose(eq("a")); + verify(mockDisposalFunc, times(1)).dispose(eq("b")); + } + + @Test + public void testPutIfAbsent() throws InterruptedException { + final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + + map.putIfAbsent(1, "a", timeoutNanos); + map.putIfAbsent(1, "b", timeoutNanos); + + assertEquals("a", map.get(1)); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + map.putIfAbsent(1, "b", timeoutNanos); + assertEquals("b", map.get(1)); + verify(mockIsItemValidFunc, times(1)).isValid(eq("a")); + verify(mockDisposalFunc, times(1)).dispose(eq("a")); + } + + @Test + public void testComputeIfAbsent() throws InterruptedException { + final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + + map.computeIfAbsent(1, (key) -> "a", timeoutNanos); + assertEquals("a", map.get(1)); + map.computeIfAbsent(1, (key) -> "b", timeoutNanos); + assertEquals("a", map.get(1)); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + map.computeIfAbsent(1, (key) -> "b", timeoutNanos); + assertEquals("b", map.get(1)); + verify(mockIsItemValidFunc, times(1)).isValid(eq("a")); + verify(mockDisposalFunc, times(1)).dispose(eq("a")); + } + + @Test + public void testRemove() { + final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + map.put(1, "a", timeoutNanos); + + map.remove(1); + assertEquals(0, map.size()); + verify(mockDisposalFunc, times(1)).dispose(eq("a")); + } + + @Test + public void testClear() { + final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + map.put(1, "a", timeoutNanos); + map.put(2, "b", timeoutNanos); + + map.clear(); + assertEquals(0, map.size()); + verify(mockDisposalFunc, times(1)).dispose(eq("a")); + verify(mockDisposalFunc, times(1)).dispose(eq("b")); + } + + @Test + public void testGetEntries() throws InterruptedException { + final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + Map expectedEntries = new HashMap<>(); + expectedEntries.put(1, "a"); + expectedEntries.put(2, "b"); + map.put(1, "a", timeoutNanos); + map.put(2, "b", timeoutNanos); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + + Map entries = map.getEntries(); + assertEquals(expectedEntries, entries); + } + + @Test + public void testCleanup() throws InterruptedException { + final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + map.setCleanupIntervalNanos(timeoutNanos * 2); + map.put(1, "a", timeoutNanos); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + map.put(2, "b", timeoutNanos); + + assertTrue(map.getCache().containsKey(1)); + assertTrue(map.getCache().get(1).isExpired()); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + map.put(2, "b", timeoutNanos); + verify(mockIsItemValidFunc, times(2)).isValid(eq("a")); + verify(mockDisposalFunc, times(1)).dispose(eq("a")); + assertNull(map.get(1)); + } +} From 55c4c333309e9475bda240de0b46880185ed55ba Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Thu, 18 May 2023 14:34:03 -0700 Subject: [PATCH 12/21] Fix unit tests for HikariPooledConnectionProvider --- .../HikariPooledConnectionProviderTest.java | 27 ++++++++----------- 1 file changed, 11 insertions(+), 16 deletions(-) diff --git a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java index b3031a838..395c6eb2c 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java @@ -74,8 +74,9 @@ class HikariPooledConnectionProviderTest { private final HostSpec readerHost2Connections = new HostSpec(readerUrl2Connections, port, HostRole.READER); private final String protocol = "protocol://"; - private List testHosts = getTestHosts(); private final Properties defaultProps = getDefaultProps(); + private HikariPooledConnectionProvider provider; + private List testHosts = getTestHosts(); private AutoCloseable closeable; @@ -110,6 +111,9 @@ void init() throws SQLException { @AfterEach void tearDown() throws Exception { + if (provider != null) { + provider.releaseResources(); + } closeable.close(); } @@ -120,8 +124,7 @@ void testConnectWithDefaultMapping() throws SQLException { final Set expectedKeys = new HashSet<>( Collections.singletonList(new PoolKey("url", user1))); - final HikariPooledConnectionProvider provider = - spy(new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig)); + provider = spy(new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig)); doReturn(mockDataSource).when(provider).createHikariDataSource(any(), any(), any()); @@ -136,8 +139,6 @@ void testConnectWithDefaultMapping() throws SQLException { final Set keys = provider.getKeys(); assertEquals(expectedKeys, keys); } - - provider.releaseResources(); } @Test @@ -146,7 +147,7 @@ void testConnectWithCustomMapping() throws SQLException { final Set expectedKeys = new HashSet<>( Collections.singletonList(new PoolKey("url", "url+someUniqueKey"))); - final HikariPooledConnectionProvider provider = spy(new HikariPooledConnectionProvider( + provider = spy(new HikariPooledConnectionProvider( (hostSpec, properties) -> mockConfig, (hostSpec, properties) -> hostSpec.getUrl() + "+someUniqueKey")); @@ -161,15 +162,12 @@ void testConnectWithCustomMapping() throws SQLException { final Set keys = provider.getKeys(); assertEquals(expectedKeys, keys); } - - provider.releaseResources(); } @Test public void testAcceptsUrl() { final String clusterUrl = "my-database.cluster-XYZ.us-east-1.rds.amazonaws.com"; - final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); + provider = new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); assertTrue( provider.acceptsUrl(protocol, new HostSpec(readerUrl2Connections), defaultProps)); @@ -179,8 +177,7 @@ public void testAcceptsUrl() { @Test public void testLeastConnectionsStrategy() throws SQLException { - final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); + provider = new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); provider.setDatabasePools(getTestPoolMap()); assertThrows(UnsupportedOperationException.class, () -> @@ -200,8 +197,7 @@ private CacheMap getTestPoolMap() { @Test public void testConfigurePool() { - final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); + provider = new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig); final String expectedJdbcUrl = protocol + readerHost1Connection.getUrl() + db + "?database=" + db; @@ -213,8 +209,7 @@ public void testConfigurePool() { @Test public void testConnectToDeletedInstance() throws SQLException { - final HikariPooledConnectionProvider provider = - spy(new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig)); + provider = spy(new HikariPooledConnectionProvider((hostSpec, properties) -> mockConfig)); doReturn(mockDataSource).when(provider) .createHikariDataSource(eq(protocol), eq(readerHost1Connection), eq(defaultProps)); From 1fefe13aead3d6fdf17fc44ee8bd526a574868bd Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Thu, 18 May 2023 14:48:27 -0700 Subject: [PATCH 13/21] Refactor duplicate code in CacheMap --- .../software/amazon/jdbc/util/CacheMap.java | 79 ++++++------------- 1 file changed, 24 insertions(+), 55 deletions(-) diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java index 910ad7f38..2f5834a45 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java @@ -44,25 +44,15 @@ public CacheMap( } public V get(final K key) { + removeIfExpired(key); final CacheItem cacheItem = cache.get(key); - if (cacheItem == null || cacheItem.isExpired()) { - cache.remove(key); - if (cacheItem != null && itemDisposalFunc != null) { - itemDisposalFunc.dispose(cacheItem.item); - } - return null; - } else { - return cacheItem.item; - } + return cacheItem == null ? null : cacheItem.item; } public V get(final K key, final V defaultItemValue, final long itemExpirationNano) { + removeIfExpired(key); CacheItem cacheItem = cache.get(key); - if (cacheItem == null || cacheItem.isExpired()) { - cache.remove(key); - if (cacheItem != null && itemDisposalFunc != null) { - itemDisposalFunc.dispose(cacheItem.item); - } + if (cacheItem == null) { cacheItem = new CacheItem(defaultItemValue, System.nanoTime() + itemExpirationNano); cache.put(key, cacheItem); } @@ -70,36 +60,34 @@ public V get(final K key, final V defaultItemValue, final long itemExpirationNan } public V getWithExtendExpiration(final K key, final long itemExpirationNano) { + removeIfExpired(key); + final CacheItem cacheItem = cache.get(key); + return cacheItem == null ? null : cacheItem.withExtendExpiration(itemExpirationNano).item; + } + + private void removeIfExpired(K key) { final CacheItem cacheItem = cache.get(key); if (cacheItem == null || cacheItem.isExpired()) { - cache.remove(key); - if (cacheItem != null && itemDisposalFunc != null) { - itemDisposalFunc.dispose(cacheItem.item); - } - return null; - } else { - return cacheItem.withExtendExpiration(itemExpirationNano).item; + removeAndDispose(key); + } + } + + private void removeAndDispose(K key) { + final CacheItem cacheItem = cache.remove(key); + if (cacheItem != null && itemDisposalFunc != null) { + itemDisposalFunc.dispose(cacheItem.item); } } public void put(final K key, final V item, final long itemExpirationNano) { cleanUp(); - final CacheItem oldItem = cache.get(key); - if (oldItem != null && itemDisposalFunc != null) { - itemDisposalFunc.dispose(oldItem.item); - } + removeAndDispose(key); cache.put(key, new CacheItem(item, System.nanoTime() + itemExpirationNano)); } public void putIfAbsent(final K key, final V item, final long itemExpirationNano) { cleanUp(); - final CacheItem currentItem = cache.get(key); - if (currentItem == null || currentItem.isExpired()) { - cache.remove(key); - if (currentItem != null && itemDisposalFunc != null) { - itemDisposalFunc.dispose(currentItem.item); - } - } + removeIfExpired(key); cache.putIfAbsent(key, new CacheItem(item, System.nanoTime() + itemExpirationNano)); } @@ -109,13 +97,7 @@ public V computeIfAbsent( final long itemExpirationNano) { cleanUp(); - final CacheItem currentItem = cache.get(key); - if (currentItem == null || currentItem.isExpired()) { - cache.remove(key); - if (currentItem != null && itemDisposalFunc != null) { - itemDisposalFunc.dispose(currentItem.item); - } - } + removeIfExpired(key); final CacheItem cacheItem = cache.computeIfAbsent( key, k -> new CacheItem( @@ -126,18 +108,12 @@ public V computeIfAbsent( public void remove(final K key) { cleanUp(); - CacheItem cacheItem = cache.remove(key); - if (itemDisposalFunc != null) { - itemDisposalFunc.dispose(cacheItem.item); - } + removeAndDispose(key); } public void clear() { for (K key : cache.keySet()) { - CacheItem cacheItem = cache.remove(key); - if (itemDisposalFunc != null) { - itemDisposalFunc.dispose(cacheItem.item); - } + removeAndDispose(key); } cache.clear(); } @@ -161,14 +137,7 @@ private void cleanUp() { this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); cache.forEach((key, value) -> { - if (value != null && !value.isExpired()) { - return; - } - - cache.remove(key); - if (value != null && itemDisposalFunc != null) { - itemDisposalFunc.dispose(value.item); - } + removeIfExpired(key); }); } From a0940cef0a84652163607ae1fcb656de046ecd1d Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Thu, 18 May 2023 17:26:41 -0700 Subject: [PATCH 14/21] SlidingExpirationMap wip --- .../jdbc/HikariPooledConnectionProvider.java | 14 +- .../software/amazon/jdbc/util/CacheMap.java | 125 ++--------- .../jdbc/util/SlidingExpirationMap.java | 177 +++++++++++++++ .../HikariPooledConnectionProviderTest.java | 17 +- .../amazon/jdbc/util/CacheMapTest.java | 206 ------------------ .../jdbc/util/SlidingExpirationMapTest.java | 124 +++++++++++ 6 files changed, 340 insertions(+), 323 deletions(-) create mode 100644 wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationMap.java delete mode 100644 wrapper/src/test/java/software/amazon/jdbc/util/CacheMapTest.java create mode 100644 wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationMapTest.java diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index ae436a2d7..9526bc3cb 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -32,11 +32,11 @@ import org.checkerframework.checker.nullness.qual.NonNull; import software.amazon.jdbc.cleanup.CanReleaseResources; import software.amazon.jdbc.dialect.Dialect; -import software.amazon.jdbc.util.CacheMap; import software.amazon.jdbc.util.HikariCPSQLException; import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.RdsUrlType; import software.amazon.jdbc.util.RdsUtils; +import software.amazon.jdbc.util.SlidingExpirationMap; import software.amazon.jdbc.util.StringUtils; public class HikariPooledConnectionProvider implements PooledConnectionProvider, @@ -48,10 +48,11 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, private static final String LEAST_CONNECTIONS_STRATEGY = "leastConnections"; private static final RdsUtils rdsUtils = new RdsUtils(); - private static CacheMap databasePools = new CacheMap<>( - (hikariDataSource) -> hikariDataSource.getHikariPoolMXBean().getActiveConnections() > 0, - (hikariDataSource) -> hikariDataSource.close() - ); + private static SlidingExpirationMap databasePools = + new SlidingExpirationMap<>( + (hikariDataSource) -> hikariDataSource.getHikariPoolMXBean().getActiveConnections() > 0, + HikariDataSource::close + ); private static long poolExpirationCheckNanos = TimeUnit.MINUTES.toNanos(30); private final HikariPoolConfigurator poolConfigurator; private final HikariPoolMapping poolMapping; @@ -337,8 +338,7 @@ public String toString() { } // For testing purposes only - void setDatabasePools(CacheMap connectionPools) { + void setDatabasePools(SlidingExpirationMap connectionPools) { databasePools = connectionPools; } - } diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java index 2f5834a45..eefe85567 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/CacheMap.java @@ -21,106 +21,51 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; -import java.util.function.Function; public class CacheMap { - private final Map cache = new ConcurrentHashMap<>(); - private long cleanupIntervalNanos = TimeUnit.MINUTES.toNanos(10); + private final Map> cache = new ConcurrentHashMap<>(); + private final long cleanupIntervalNanos = TimeUnit.MINUTES.toNanos(10); private final AtomicLong cleanupTimeNanos = new AtomicLong(System.nanoTime() + cleanupIntervalNanos); - private final IsItemValidFunc isItemValidFunc; - private final ItemDisposalFunc itemDisposalFunc; public CacheMap() { - this.isItemValidFunc = null; - this.itemDisposalFunc = null; - } - - public CacheMap( - final IsItemValidFunc isItemValidFunc, - final ItemDisposalFunc itemDisposalFunc) { - this.isItemValidFunc = isItemValidFunc; - this.itemDisposalFunc = itemDisposalFunc; } public V get(final K key) { - removeIfExpired(key); - final CacheItem cacheItem = cache.get(key); + final CacheItem cacheItem = cache.computeIfPresent(key, (kk, vv) -> vv.isExpired() ? null : vv); return cacheItem == null ? null : cacheItem.item; } public V get(final K key, final V defaultItemValue, final long itemExpirationNano) { - removeIfExpired(key); - CacheItem cacheItem = cache.get(key); - if (cacheItem == null) { - cacheItem = new CacheItem(defaultItemValue, System.nanoTime() + itemExpirationNano); - cache.put(key, cacheItem); - } + final CacheItem cacheItem = cache.compute(key, + (kk, vv) -> (vv == null || vv.isExpired()) + ? new CacheItem<>(defaultItemValue, System.nanoTime() + itemExpirationNano) + : vv); return cacheItem.item; } - public V getWithExtendExpiration(final K key, final long itemExpirationNano) { - removeIfExpired(key); - final CacheItem cacheItem = cache.get(key); - return cacheItem == null ? null : cacheItem.withExtendExpiration(itemExpirationNano).item; - } - - private void removeIfExpired(K key) { - final CacheItem cacheItem = cache.get(key); - if (cacheItem == null || cacheItem.isExpired()) { - removeAndDispose(key); - } - } - - private void removeAndDispose(K key) { - final CacheItem cacheItem = cache.remove(key); - if (cacheItem != null && itemDisposalFunc != null) { - itemDisposalFunc.dispose(cacheItem.item); - } - } - public void put(final K key, final V item, final long itemExpirationNano) { + cache.put(key, new CacheItem<>(item, System.nanoTime() + itemExpirationNano)); cleanUp(); - removeAndDispose(key); - cache.put(key, new CacheItem(item, System.nanoTime() + itemExpirationNano)); } public void putIfAbsent(final K key, final V item, final long itemExpirationNano) { + cache.putIfAbsent(key, new CacheItem<>(item, System.nanoTime() + itemExpirationNano)); cleanUp(); - removeIfExpired(key); - cache.putIfAbsent(key, new CacheItem(item, System.nanoTime() + itemExpirationNano)); - } - - public V computeIfAbsent( - final K key, - Function mappingFunction, - final long itemExpirationNano) { - - cleanUp(); - removeIfExpired(key); - final CacheItem cacheItem = cache.computeIfAbsent( - key, - k -> new CacheItem( - mappingFunction.apply(k), - System.nanoTime() + itemExpirationNano)); - return cacheItem.withExtendExpiration(itemExpirationNano).item; } public void remove(final K key) { + cache.remove(key); cleanUp(); - removeAndDispose(key); } public void clear() { - for (K key : cache.keySet()) { - removeAndDispose(key); - } cache.clear(); } public Map getEntries() { final Map entries = new HashMap<>(); - for (final Map.Entry entry : this.cache.entrySet()) { + for (final Map.Entry> entry : this.cache.entrySet()) { entries.put(entry.getKey(), entry.getValue().item); } return entries; @@ -131,27 +76,19 @@ public int size() { } private void cleanUp() { - if (this.cleanupTimeNanos.get() > System.nanoTime()) { - return; + if (this.cleanupTimeNanos.get() < System.nanoTime()) { + this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); + cache.forEach((key, value) -> { + if (value == null || value.isExpired()) { + cache.remove(key); + } + }); } - - this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); - cache.forEach((key, value) -> { - removeIfExpired(key); - }); - } - - public interface IsItemValidFunc { - boolean isValid(V item); - } - - public interface ItemDisposalFunc { - void dispose(V item); } - class CacheItem { - private final V item; - private long expirationTime; + private static class CacheItem { + final V item; + final long expirationTime; public CacheItem(final V item, final long expirationTime) { this.item = item; @@ -159,17 +96,9 @@ public CacheItem(final V item, final long expirationTime) { } boolean isExpired() { - if (isItemValidFunc != null) { - return System.nanoTime() > expirationTime && !isItemValidFunc.isValid(this.item); - } return System.nanoTime() > expirationTime; } - public CacheItem withExtendExpiration(final long itemExpirationNano) { - this.expirationTime = System.nanoTime() + itemExpirationNano; - return this; - } - @Override public int hashCode() { final int prime = 31; @@ -189,7 +118,7 @@ public boolean equals(final Object obj) { if (getClass() != obj.getClass()) { return false; } - final CacheItem other = (CacheItem) obj; + final CacheItem other = (CacheItem) obj; if (item == null) { return other.item == null; } else { @@ -202,14 +131,4 @@ public String toString() { return "CacheItem [item=" + item + ", expirationTime=" + expirationTime + "]"; } } - - // For testing purposes - void setCleanupIntervalNanos(long cleanupIntervalNanos) { - this.cleanupIntervalNanos = cleanupIntervalNanos; - this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); - } - - Map getCache() { - return cache; - } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationMap.java b/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationMap.java new file mode 100644 index 000000000..bd525a03b --- /dev/null +++ b/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationMap.java @@ -0,0 +1,177 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Function; + +public class SlidingExpirationMap { + private final Map cache = new ConcurrentHashMap<>(); + private long cleanupIntervalNanos = TimeUnit.MINUTES.toNanos(10); + private final AtomicLong cleanupTimeNanos = new AtomicLong(System.nanoTime() + cleanupIntervalNanos); + private final ShouldDisposeFunc shouldDisposeFunc; + private final ItemDisposalFunc itemDisposalFunc; + + public SlidingExpirationMap() { + this.shouldDisposeFunc = null; + this.itemDisposalFunc = null; + } + + public SlidingExpirationMap( + final ShouldDisposeFunc shouldDisposeFunc, + final ItemDisposalFunc itemDisposalFunc) { + this.shouldDisposeFunc = shouldDisposeFunc; + this.itemDisposalFunc = itemDisposalFunc; + } + + public V computeIfAbsent( + final K key, + Function mappingFunction, + final long itemExpirationNano) { + + cleanUp(); + final CacheItem cacheItem = cache.computeIfAbsent( + key, + k -> new CacheItem( + mappingFunction.apply(k), + System.nanoTime() + itemExpirationNano)); + return cacheItem.withExtendExpiration(itemExpirationNano).item; + } + + public void remove(final K key) { + cleanUp(); + removeAndDispose(key); + } + + private void removeAndDispose(K key) { + final CacheItem cacheItem = cache.remove(key); + if (cacheItem != null && itemDisposalFunc != null) { + itemDisposalFunc.dispose(cacheItem.item); + } + } + + private void removeIfExpired(K key) { + final CacheItem cacheItem = cache.get(key); + if (cacheItem == null || cacheItem.isExpired()) { + removeAndDispose(key); + } + } + + public void clear() { + for (K key : cache.keySet()) { + removeAndDispose(key); + } + cache.clear(); + } + + public Map getEntries() { + final Map entries = new HashMap<>(); + for (final Map.Entry entry : this.cache.entrySet()) { + entries.put(entry.getKey(), entry.getValue().item); + } + return entries; + } + + public int size() { + return this.cache.size(); + } + + private void cleanUp() { + if (this.cleanupTimeNanos.get() > System.nanoTime()) { + return; + } + + this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); + cache.forEach((key, value) -> removeIfExpired(key)); + } + + public interface ShouldDisposeFunc { + boolean isValid(V item); + } + + public interface ItemDisposalFunc { + void dispose(V item); + } + + // For testing purposes + void setCleanupIntervalNanos(long cleanupIntervalNanos) { + this.cleanupIntervalNanos = cleanupIntervalNanos; + this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); + } + + Map getCache() { + return cache; + } + + class CacheItem { + private final V item; + private long expirationTime; + + public CacheItem(final V item, final long expirationTime) { + this.item = item; + this.expirationTime = expirationTime; + } + + boolean isExpired() { + if (shouldDisposeFunc != null) { + return System.nanoTime() > expirationTime && !shouldDisposeFunc.isValid(this.item); + } + return System.nanoTime() > expirationTime; + } + + public CacheItem withExtendExpiration(final long itemExpirationNano) { + this.expirationTime = System.nanoTime() + itemExpirationNano; + return this; + } + + @Override + public int hashCode() { + final int prime = 31; + int result = 1; + result = prime * result + ((item == null) ? 0 : item.hashCode()); + return result; + } + + @Override + public boolean equals(final Object obj) { + if (this == obj) { + return true; + } + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + final CacheItem other = (CacheItem) obj; + if (item == null) { + return other.item == null; + } else { + return item.equals(other.item); + } + } + + @Override + public String toString() { + return "CacheItem [item=" + item + ", expirationTime=" + expirationTime + "]"; + } + } +} diff --git a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java index 395c6eb2c..bc59562bc 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java @@ -46,7 +46,7 @@ import org.mockito.MockitoAnnotations; import software.amazon.jdbc.HikariPooledConnectionProvider.PoolKey; import software.amazon.jdbc.dialect.Dialect; -import software.amazon.jdbc.util.CacheMap; +import software.amazon.jdbc.util.SlidingExpirationMap; class HikariPooledConnectionProviderTest { @Mock Connection mockConnection; @@ -75,8 +75,8 @@ class HikariPooledConnectionProviderTest { private final String protocol = "protocol://"; private final Properties defaultProps = getDefaultProps(); + private final List testHosts = getTestHosts(); private HikariPooledConnectionProvider provider; - private List testHosts = getTestHosts(); private AutoCloseable closeable; @@ -187,11 +187,14 @@ public void testLeastConnectionsStrategy() throws SQLException { assertEquals(readerUrl1Connection, selectedHost.getHost()); } - private CacheMap getTestPoolMap() { - CacheMap map = new CacheMap<>(); - map.put(new PoolKey(readerHost2Connections.getUrl(), user1), dsWith1Connection, TimeUnit.MINUTES.toNanos(10)); - map.put(new PoolKey(readerHost2Connections.getUrl(), user2), dsWith1Connection, TimeUnit.MINUTES.toNanos(10)); - map.put(new PoolKey(readerHost1Connection.getUrl(), user1), dsWith1Connection, TimeUnit.MINUTES.toNanos(10)); + private SlidingExpirationMap getTestPoolMap() { + SlidingExpirationMap map = new SlidingExpirationMap<>(); + map.computeIfAbsent(new PoolKey(readerHost2Connections.getUrl(), user1), + (key) -> dsWith1Connection, TimeUnit.MINUTES.toNanos(10)); + map.computeIfAbsent(new PoolKey(readerHost2Connections.getUrl(), user2), + (key) -> dsWith1Connection, TimeUnit.MINUTES.toNanos(10)); + map.computeIfAbsent(new PoolKey(readerHost1Connection.getUrl(), user1), + (key) -> dsWith1Connection, TimeUnit.MINUTES.toNanos(10)); return map; } diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/CacheMapTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/CacheMapTest.java deleted file mode 100644 index a60c57cde..000000000 --- a/wrapper/src/test/java/software/amazon/jdbc/util/CacheMapTest.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"). - * You may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package software.amazon.jdbc.util; - -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -import java.util.HashMap; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -class CacheMapTest { - - @Mock CacheMap.ItemDisposalFunc mockDisposalFunc; - @Mock CacheMap.IsItemValidFunc mockIsItemValidFunc; - private AutoCloseable closeable; - - @BeforeEach - void init() { - closeable = MockitoAnnotations.openMocks(this); - } - - @AfterEach - void tearDown() throws Exception { - closeable.close(); - } - - @Test - public void testGet() throws InterruptedException { - final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); - final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); - map.put(1, "a", timeoutNanos); - - assertEquals("a", map.get(1)); - TimeUnit.NANOSECONDS.sleep(timeoutNanos); - assertNull(map.get(1)); - assertEquals(0, map.getCache().size()); - verify(mockIsItemValidFunc, times(1)).isValid(any()); - verify(mockDisposalFunc, times(1)).dispose(any()); - } - - @Test - public void testGetWithDefault() throws InterruptedException { - final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); - final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); - map.put(1, "a", timeoutNanos); - - assertEquals("a", map.get(1, "b", timeoutNanos)); - TimeUnit.NANOSECONDS.sleep(timeoutNanos); - assertEquals("b", map.get(1, "b", timeoutNanos)); - TimeUnit.NANOSECONDS.sleep(timeoutNanos); - assertNull(map.get(1)); - assertEquals(0, map.getCache().size()); - verify(mockIsItemValidFunc, times(1)).isValid(eq("a")); - verify(mockIsItemValidFunc, times(1)).isValid(eq("b")); - verify(mockDisposalFunc, times(1)).dispose(eq("a")); - verify(mockDisposalFunc, times(1)).dispose(eq("b")); - } - - @Test - public void testGetWithExtendExpiration() throws InterruptedException { - final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); - final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); - map.put(1, "a", TimeUnit.SECONDS.toNanos(timeoutNanos)); - TimeUnit.NANOSECONDS.sleep(timeoutNanos / 2); - - map.getWithExtendExpiration(1, timeoutNanos); - TimeUnit.NANOSECONDS.sleep(timeoutNanos / 2); - - assertEquals("a", map.get(1)); - TimeUnit.NANOSECONDS.sleep(timeoutNanos / 2); - assertNull(map.getWithExtendExpiration(1, timeoutNanos)); - verify(mockIsItemValidFunc, times(1)).isValid(any()); - verify(mockDisposalFunc, times(1)).dispose(any()); - } - - @Test - public void testPut() throws InterruptedException { - final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); - final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); - - map.put(1, "a", timeoutNanos); - TimeUnit.NANOSECONDS.sleep(timeoutNanos); - map.put(2, "b", timeoutNanos); - map.put(2, "c", timeoutNanos); - - assertNull(map.get(1)); - assertEquals("c", map.get(2)); - verify(mockIsItemValidFunc, times(1)).isValid(eq("a")); - verify(mockDisposalFunc, times(1)).dispose(eq("a")); - verify(mockDisposalFunc, times(1)).dispose(eq("b")); - } - - @Test - public void testPutIfAbsent() throws InterruptedException { - final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); - final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); - - map.putIfAbsent(1, "a", timeoutNanos); - map.putIfAbsent(1, "b", timeoutNanos); - - assertEquals("a", map.get(1)); - TimeUnit.NANOSECONDS.sleep(timeoutNanos); - map.putIfAbsent(1, "b", timeoutNanos); - assertEquals("b", map.get(1)); - verify(mockIsItemValidFunc, times(1)).isValid(eq("a")); - verify(mockDisposalFunc, times(1)).dispose(eq("a")); - } - - @Test - public void testComputeIfAbsent() throws InterruptedException { - final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); - final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); - - map.computeIfAbsent(1, (key) -> "a", timeoutNanos); - assertEquals("a", map.get(1)); - map.computeIfAbsent(1, (key) -> "b", timeoutNanos); - assertEquals("a", map.get(1)); - TimeUnit.NANOSECONDS.sleep(timeoutNanos); - map.computeIfAbsent(1, (key) -> "b", timeoutNanos); - assertEquals("b", map.get(1)); - verify(mockIsItemValidFunc, times(1)).isValid(eq("a")); - verify(mockDisposalFunc, times(1)).dispose(eq("a")); - } - - @Test - public void testRemove() { - final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); - final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); - map.put(1, "a", timeoutNanos); - - map.remove(1); - assertEquals(0, map.size()); - verify(mockDisposalFunc, times(1)).dispose(eq("a")); - } - - @Test - public void testClear() { - final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); - final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); - map.put(1, "a", timeoutNanos); - map.put(2, "b", timeoutNanos); - - map.clear(); - assertEquals(0, map.size()); - verify(mockDisposalFunc, times(1)).dispose(eq("a")); - verify(mockDisposalFunc, times(1)).dispose(eq("b")); - } - - @Test - public void testGetEntries() throws InterruptedException { - final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); - final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); - Map expectedEntries = new HashMap<>(); - expectedEntries.put(1, "a"); - expectedEntries.put(2, "b"); - map.put(1, "a", timeoutNanos); - map.put(2, "b", timeoutNanos); - TimeUnit.NANOSECONDS.sleep(timeoutNanos); - - Map entries = map.getEntries(); - assertEquals(expectedEntries, entries); - } - - @Test - public void testCleanup() throws InterruptedException { - final CacheMap map = new CacheMap<>(mockIsItemValidFunc, mockDisposalFunc); - final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); - map.setCleanupIntervalNanos(timeoutNanos * 2); - map.put(1, "a", timeoutNanos); - TimeUnit.NANOSECONDS.sleep(timeoutNanos); - map.put(2, "b", timeoutNanos); - - assertTrue(map.getCache().containsKey(1)); - assertTrue(map.getCache().get(1).isExpired()); - TimeUnit.NANOSECONDS.sleep(timeoutNanos); - map.put(2, "b", timeoutNanos); - verify(mockIsItemValidFunc, times(2)).isValid(eq("a")); - verify(mockDisposalFunc, times(1)).dispose(eq("a")); - assertNull(map.get(1)); - } -} diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationMapTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationMapTest.java new file mode 100644 index 000000000..87d348be2 --- /dev/null +++ b/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationMapTest.java @@ -0,0 +1,124 @@ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"). + * You may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package software.amazon.jdbc.util; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class SlidingExpirationMapTest { + @Mock SlidingExpirationMap.ItemDisposalFunc mockDisposalFunc; + @Mock SlidingExpirationMap.ShouldDisposeFunc mockIsItemValidFunc; + private AutoCloseable closeable; + + @BeforeEach + void init() { + closeable = MockitoAnnotations.openMocks(this); + } + + @AfterEach + void tearDown() throws Exception { + closeable.close(); + } + + @Test + public void testComputeIfAbsent() throws InterruptedException { + final SlidingExpirationMap map = new SlidingExpirationMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + map.setCleanupIntervalNanos(timeoutNanos * 2); + + map.computeIfAbsent(1, (key) -> "a", timeoutNanos); + assertEquals("a", map.computeIfAbsent(1, (key) -> "b", timeoutNanos)); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + assertEquals("a", map.computeIfAbsent(1, (key) -> "b", timeoutNanos)); + TimeUnit.NANOSECONDS.sleep(timeoutNanos / 2); + assertEquals("a", map.computeIfAbsent(1, (key) -> "b", timeoutNanos)); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + assertEquals("b", map.computeIfAbsent(1, (key) -> "b", timeoutNanos)); + verify(mockIsItemValidFunc, times(1)).isValid(eq("a")); + verify(mockDisposalFunc, times(1)).dispose(eq("a")); + } + + @Test + public void testRemove() { + final SlidingExpirationMap map = new SlidingExpirationMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + map.computeIfAbsent(1, (key) -> "a", timeoutNanos); + + map.remove(1); + assertEquals(0, map.size()); + verify(mockDisposalFunc, times(1)).dispose(eq("a")); + } + + @Test + public void testClear() { + final SlidingExpirationMap map = new SlidingExpirationMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + map.computeIfAbsent(1, (key) -> "a", timeoutNanos); + map.computeIfAbsent(2, (key) -> "b", timeoutNanos); + + map.clear(); + assertEquals(0, map.size()); + verify(mockDisposalFunc, times(1)).dispose(eq("a")); + verify(mockDisposalFunc, times(1)).dispose(eq("b")); + } + + @Test + public void testGetEntries() throws InterruptedException { + final SlidingExpirationMap map = new SlidingExpirationMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + Map expectedEntries = new HashMap<>(); + expectedEntries.put(1, "a"); + expectedEntries.put(2, "b"); + map.computeIfAbsent(1, (key) -> "a", timeoutNanos); + map.computeIfAbsent(2, (key) -> "b", timeoutNanos); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + + Map entries = map.getEntries(); + assertEquals(expectedEntries, entries); + } + + @Test + public void testCleanup() throws InterruptedException { + final SlidingExpirationMap map = new SlidingExpirationMap<>(mockIsItemValidFunc, mockDisposalFunc); + final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); + map.setCleanupIntervalNanos(timeoutNanos * 2); + map.computeIfAbsent(1, (key) -> "a", timeoutNanos); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + map.computeIfAbsent(2, (key) -> "b", timeoutNanos); + + assertTrue(map.getCache().containsKey(1)); + assertTrue(map.getCache().get(1).isExpired()); + TimeUnit.NANOSECONDS.sleep(timeoutNanos); + assertEquals("c", map.computeIfAbsent(2, (key) -> "c", timeoutNanos)); + verify(mockIsItemValidFunc, times(2)).isValid(eq("a")); + verify(mockDisposalFunc, times(1)).dispose(eq("a")); + verify(mockDisposalFunc, times(1)).dispose(eq("b")); + assertEquals("d", map.computeIfAbsent(1, (key) -> "d", timeoutNanos)); + } +} From e569c8e9bad6d93907db02400d29a61c2d63d21d Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Fri, 19 May 2023 15:21:10 -0700 Subject: [PATCH 15/21] Change HikariPooledConnectionProvider constructors, add SlidingExpirationMap javadocs --- .../jdbc/HikariPooledConnectionProvider.java | 46 +++++-- ...onMap.java => SlidingExpirationCache.java} | 119 +++++++++++++++--- .../container/tests/AutoscalingTests.java | 6 +- .../HikariPooledConnectionProviderTest.java | 6 +- ...t.java => SlidingExpirationCacheTest.java} | 23 ++-- 5 files changed, 161 insertions(+), 39 deletions(-) rename wrapper/src/main/java/software/amazon/jdbc/util/{SlidingExpirationMap.java => SlidingExpirationCache.java} (50%) rename wrapper/src/test/java/software/amazon/jdbc/util/{SlidingExpirationMapTest.java => SlidingExpirationCacheTest.java} (81%) diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index 9526bc3cb..9373728b2 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -36,7 +36,7 @@ import software.amazon.jdbc.util.Messages; import software.amazon.jdbc.util.RdsUrlType; import software.amazon.jdbc.util.RdsUtils; -import software.amazon.jdbc.util.SlidingExpirationMap; +import software.amazon.jdbc.util.SlidingExpirationCache; import software.amazon.jdbc.util.StringUtils; public class HikariPooledConnectionProvider implements PooledConnectionProvider, @@ -48,8 +48,8 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, private static final String LEAST_CONNECTIONS_STRATEGY = "leastConnections"; private static final RdsUtils rdsUtils = new RdsUtils(); - private static SlidingExpirationMap databasePools = - new SlidingExpirationMap<>( + private static SlidingExpirationCache databasePools = + new SlidingExpirationCache<>( (hikariDataSource) -> hikariDataSource.getHikariPoolMXBean().getActiveConnections() > 0, HikariDataSource::close ); @@ -76,10 +76,28 @@ public HikariPooledConnectionProvider(HikariPoolConfigurator hikariPoolConfigura this(hikariPoolConfigurator, null); } + /** + * {@link HikariPooledConnectionProvider} constructor. This class can be passed to + * {@link ConnectionProviderManager#setConnectionProvider} to enable internal connection pools for + * each database instance in a cluster. By maintaining internal connection pools, the driver can + * improve performance by reusing old {@link Connection} objects. + * + * @param hikariPoolConfigurator a function that returns a {@link HikariConfig} with specific + * Hikari configurations. By default, the + * {@link HikariPooledConnectionProvider} will configure the + * jdbcUrl, exceptionOverrideClassName, username, and password. Any + * additional configuration should be defined by passing in this + * parameter. If no additional configuration is desired, pass in a + * {@link HikariPoolConfigurator} that returns an empty + * HikariConfig. + * @param mapping a function that returns a String key used for the internal + * connection pool keys. An internal connection pool will be + * generated for each unique key returned by this function. + */ public HikariPooledConnectionProvider( - HikariPoolConfigurator hikariPoolConfigurator, long expirationCheckNanos) { - this(hikariPoolConfigurator, null); - poolExpirationCheckNanos = expirationCheckNanos; + HikariPoolConfigurator hikariPoolConfigurator, HikariPoolMapping mapping) { + this.poolConfigurator = hikariPoolConfigurator; + this.poolMapping = mapping; } /** @@ -99,11 +117,23 @@ public HikariPooledConnectionProvider( * @param mapping a function that returns a String key used for the internal * connection pool keys. An internal connection pool will be * generated for each unique key returned by this function. + * @param poolExpirationNanos the amount of time that a pool should sit in the cache before + * being marked as expired for cleanup, in nanoseconds. Expired + * pools can still be used and will not be closed unless there + * are no active connections. + * @param poolCleanupNanos the interval defining how often expired connection pools + * should be cleaned up, in nanoseconds. Note that expired pools + * will not be closed unless there are no active connections. */ public HikariPooledConnectionProvider( - HikariPoolConfigurator hikariPoolConfigurator, HikariPoolMapping mapping) { + HikariPoolConfigurator hikariPoolConfigurator, + HikariPoolMapping mapping, + long poolExpirationNanos, + long poolCleanupNanos) { this.poolConfigurator = hikariPoolConfigurator; this.poolMapping = mapping; + poolExpirationCheckNanos = poolExpirationNanos; + databasePools.setCleanupIntervalNanos(poolCleanupNanos); } @Override @@ -338,7 +368,7 @@ public String toString() { } // For testing purposes only - void setDatabasePools(SlidingExpirationMap connectionPools) { + void setDatabasePools(SlidingExpirationCache connectionPools) { databasePools = connectionPools; } } diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationMap.java b/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java similarity index 50% rename from wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationMap.java rename to wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java index bd525a03b..fa5767a9b 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationMap.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java @@ -23,25 +23,52 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.function.Function; -public class SlidingExpirationMap { +public class SlidingExpirationCache { private final Map cache = new ConcurrentHashMap<>(); private long cleanupIntervalNanos = TimeUnit.MINUTES.toNanos(10); private final AtomicLong cleanupTimeNanos = new AtomicLong(System.nanoTime() + cleanupIntervalNanos); private final ShouldDisposeFunc shouldDisposeFunc; private final ItemDisposalFunc itemDisposalFunc; - public SlidingExpirationMap() { + /** + * A cache that periodically cleans up expired entries. Fetching an expired entry marks that entry + * as not-expired and renews its expiration time. + */ + public SlidingExpirationCache() { this.shouldDisposeFunc = null; this.itemDisposalFunc = null; } - public SlidingExpirationMap( + /** + * A cache that periodically cleans up expired entries. Fetching an expired entry marks that entry + * as not-expired and renews its expiration time. + * + * @param shouldDisposeFunc a function defining the conditions under which an expired entry should + * be cleaned up when we hit the cleanup time + * @param itemDisposalFunc a function that will be called on any item that meets the cleanup + * criteria at cleanup time. The criteria for cleanup is that the item + * is both expired and marked for cleanup via a call to + * shouldDisposeFunc. + */ + public SlidingExpirationCache( final ShouldDisposeFunc shouldDisposeFunc, final ItemDisposalFunc itemDisposalFunc) { this.shouldDisposeFunc = shouldDisposeFunc; this.itemDisposalFunc = itemDisposalFunc; } + /** + * In addition to performing the logic defined by {@link Map#computeIfAbsent}, cleans up expired + * entries if we have hit cleanup time. If an expired entry is requested and we have not hit + * cleanup time or {@link ShouldDisposeFunc} indicated the entry should not be closed, the entry + * will be marked as non-expired. + * + * @param key the key with which the specified value is to be associated + * @param mappingFunction the function to compute a value + * @param itemExpirationNano the expiration time of the new or renewed entry + * @return the current (existing or computed) value associated with the specified key, or null if + * the computed value is null + */ public V computeIfAbsent( final K key, Function mappingFunction, @@ -56,6 +83,12 @@ public V computeIfAbsent( return cacheItem.withExtendExpiration(itemExpirationNano).item; } + /** + * Cleanup expired entries if we have hit the cleanup time, then remove and dispose the value + * associated with the given key. + * + * @param key the key associated with the value to be removed/disposed + */ public void remove(final K key) { cleanUp(); removeAndDispose(key); @@ -70,11 +103,14 @@ private void removeAndDispose(K key) { private void removeIfExpired(K key) { final CacheItem cacheItem = cache.get(key); - if (cacheItem == null || cacheItem.isExpired()) { + if (cacheItem == null || cacheItem.shouldCleanup()) { removeAndDispose(key); } } + /** + * Remove and dispose of all entries in the cache. + */ public void clear() { for (K key : cache.keySet()) { removeAndDispose(key); @@ -82,6 +118,11 @@ public void clear() { cache.clear(); } + /** + * Get a map copy of all entries in the cache, including expired entries. + * + * @return a map copy of all entries in the cache, including expired entries + */ public Map getEntries() { final Map entries = new HashMap<>(); for (final Map.Entry entry : this.cache.entrySet()) { @@ -90,6 +131,11 @@ public Map getEntries() { return entries; } + /** + * Get the current size of the cache, including expired entries. + * + * @return the current size of the cache, including expired entries. + */ public int size() { return this.cache.size(); } @@ -103,42 +149,79 @@ private void cleanUp() { cache.forEach((key, value) -> removeIfExpired(key)); } + /** + * Set the cleanup interval for the cache. At cleanup time, expired entries marked for cleanup via + * {@link ShouldDisposeFunc} (if defined) are disposed. + * + * @param cleanupIntervalNanos + */ + public void setCleanupIntervalNanos(long cleanupIntervalNanos) { + this.cleanupIntervalNanos = cleanupIntervalNanos; + this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); + } + + /** + * An optional function defining the conditions under which an expired entry should be cleaned up + * at cleanup time. + * + * @param the type of object being analyzed for disposal + */ public interface ShouldDisposeFunc { boolean isValid(V item); } + /** + * An optional function defining extra cleanup steps to take when a cache item is cleaned up. + * + * @param the type of object being disposed + */ public interface ItemDisposalFunc { void dispose(V item); } - // For testing purposes - void setCleanupIntervalNanos(long cleanupIntervalNanos) { - this.cleanupIntervalNanos = cleanupIntervalNanos; - this.cleanupTimeNanos.set(System.nanoTime() + cleanupIntervalNanos); - } - + // For testing purposes only Map getCache() { return cache; } class CacheItem { private final V item; - private long expirationTime; + private long expirationTimeNano; - public CacheItem(final V item, final long expirationTime) { + /** + * CacheItem constructor. + * + * @param item the item value + * @param expirationTimeNano the amount of time before a CacheItem should be marked as expired. + */ + public CacheItem(final V item, final long expirationTimeNano) { this.item = item; - this.expirationTime = expirationTime; + this.expirationTimeNano = expirationTimeNano; } - boolean isExpired() { + /** + * Determines if a cache item should be cleaned up. An item should be cleaned up if it has past + * its expiration time and {@link ShouldDisposeFunc} (if defined) indicates that it should be + * cleaned up. + * + * @return true if the cache item should be cleaned up at cleanup time. Otherwise, returns + * false. + */ + boolean shouldCleanup() { if (shouldDisposeFunc != null) { - return System.nanoTime() > expirationTime && !shouldDisposeFunc.isValid(this.item); + return System.nanoTime() > expirationTimeNano && !shouldDisposeFunc.isValid(this.item); } - return System.nanoTime() > expirationTime; + return System.nanoTime() > expirationTimeNano; } + /** + * Renew a cache item's expiration time and return the value. + * + * @param itemExpirationNano the new expiration duration for the item + * @return the item value + */ public CacheItem withExtendExpiration(final long itemExpirationNano) { - this.expirationTime = System.nanoTime() + itemExpirationNano; + this.expirationTimeNano = System.nanoTime() + itemExpirationNano; return this; } @@ -171,7 +254,7 @@ public boolean equals(final Object obj) { @Override public String toString() { - return "CacheItem [item=" + item + ", expirationTime=" + expirationTime + "]"; + return "CacheItem [item=" + item + ", expirationTime=" + expirationTimeNano + "]"; } } } diff --git a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java index e842affd3..aeeaf87ff 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java @@ -120,7 +120,11 @@ public void test_pooledConnectionAutoScaling_setReadOnlyOnOldConnection() final int originalClusterSize = instances.size(); final long poolExpirationNanos = TimeUnit.MINUTES.toNanos(3); final HikariPooledConnectionProvider provider = - new HikariPooledConnectionProvider(getHikariConfig(instances.size()), poolExpirationNanos); + new HikariPooledConnectionProvider( + getHikariConfig(instances.size()), + null, + poolExpirationNanos, + TimeUnit.MINUTES.toNanos(10)); ConnectionProviderManager.setConnectionProvider(provider); final List connections = new ArrayList<>(); diff --git a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java index bc59562bc..6acd25e70 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/HikariPooledConnectionProviderTest.java @@ -46,7 +46,7 @@ import org.mockito.MockitoAnnotations; import software.amazon.jdbc.HikariPooledConnectionProvider.PoolKey; import software.amazon.jdbc.dialect.Dialect; -import software.amazon.jdbc.util.SlidingExpirationMap; +import software.amazon.jdbc.util.SlidingExpirationCache; class HikariPooledConnectionProviderTest { @Mock Connection mockConnection; @@ -187,8 +187,8 @@ public void testLeastConnectionsStrategy() throws SQLException { assertEquals(readerUrl1Connection, selectedHost.getHost()); } - private SlidingExpirationMap getTestPoolMap() { - SlidingExpirationMap map = new SlidingExpirationMap<>(); + private SlidingExpirationCache getTestPoolMap() { + SlidingExpirationCache map = new SlidingExpirationCache<>(); map.computeIfAbsent(new PoolKey(readerHost2Connections.getUrl(), user1), (key) -> dsWith1Connection, TimeUnit.MINUTES.toNanos(10)); map.computeIfAbsent(new PoolKey(readerHost2Connections.getUrl(), user2), diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationMapTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationCacheTest.java similarity index 81% rename from wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationMapTest.java rename to wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationCacheTest.java index 87d348be2..944d760ca 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationMapTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationCacheTest.java @@ -31,9 +31,9 @@ import org.mockito.Mock; import org.mockito.MockitoAnnotations; -public class SlidingExpirationMapTest { - @Mock SlidingExpirationMap.ItemDisposalFunc mockDisposalFunc; - @Mock SlidingExpirationMap.ShouldDisposeFunc mockIsItemValidFunc; +public class SlidingExpirationCacheTest { + @Mock SlidingExpirationCache.ItemDisposalFunc mockDisposalFunc; + @Mock SlidingExpirationCache.ShouldDisposeFunc mockIsItemValidFunc; private AutoCloseable closeable; @BeforeEach @@ -48,7 +48,8 @@ void tearDown() throws Exception { @Test public void testComputeIfAbsent() throws InterruptedException { - final SlidingExpirationMap map = new SlidingExpirationMap<>(mockIsItemValidFunc, mockDisposalFunc); + final SlidingExpirationCache + map = new SlidingExpirationCache<>(mockIsItemValidFunc, mockDisposalFunc); final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); map.setCleanupIntervalNanos(timeoutNanos * 2); @@ -66,7 +67,8 @@ public void testComputeIfAbsent() throws InterruptedException { @Test public void testRemove() { - final SlidingExpirationMap map = new SlidingExpirationMap<>(mockIsItemValidFunc, mockDisposalFunc); + final SlidingExpirationCache + map = new SlidingExpirationCache<>(mockIsItemValidFunc, mockDisposalFunc); final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); map.computeIfAbsent(1, (key) -> "a", timeoutNanos); @@ -77,7 +79,8 @@ public void testRemove() { @Test public void testClear() { - final SlidingExpirationMap map = new SlidingExpirationMap<>(mockIsItemValidFunc, mockDisposalFunc); + final SlidingExpirationCache + map = new SlidingExpirationCache<>(mockIsItemValidFunc, mockDisposalFunc); final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); map.computeIfAbsent(1, (key) -> "a", timeoutNanos); map.computeIfAbsent(2, (key) -> "b", timeoutNanos); @@ -90,7 +93,8 @@ public void testClear() { @Test public void testGetEntries() throws InterruptedException { - final SlidingExpirationMap map = new SlidingExpirationMap<>(mockIsItemValidFunc, mockDisposalFunc); + final SlidingExpirationCache + map = new SlidingExpirationCache<>(mockIsItemValidFunc, mockDisposalFunc); final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); Map expectedEntries = new HashMap<>(); expectedEntries.put(1, "a"); @@ -105,7 +109,8 @@ public void testGetEntries() throws InterruptedException { @Test public void testCleanup() throws InterruptedException { - final SlidingExpirationMap map = new SlidingExpirationMap<>(mockIsItemValidFunc, mockDisposalFunc); + final SlidingExpirationCache + map = new SlidingExpirationCache<>(mockIsItemValidFunc, mockDisposalFunc); final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); map.setCleanupIntervalNanos(timeoutNanos * 2); map.computeIfAbsent(1, (key) -> "a", timeoutNanos); @@ -113,7 +118,7 @@ public void testCleanup() throws InterruptedException { map.computeIfAbsent(2, (key) -> "b", timeoutNanos); assertTrue(map.getCache().containsKey(1)); - assertTrue(map.getCache().get(1).isExpired()); + assertTrue(map.getCache().get(1).shouldCleanup()); TimeUnit.NANOSECONDS.sleep(timeoutNanos); assertEquals("c", map.computeIfAbsent(2, (key) -> "c", timeoutNanos)); verify(mockIsItemValidFunc, times(2)).isValid(eq("a")); From 1fe56693a917b527b05fc5666c76e9bbbcc3a408 Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Tue, 23 May 2023 09:41:30 -0700 Subject: [PATCH 16/21] Fix checkstyle --- .../java/software/amazon/jdbc/util/SlidingExpirationCache.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java b/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java index fa5767a9b..66ae0666b 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java @@ -153,7 +153,8 @@ private void cleanUp() { * Set the cleanup interval for the cache. At cleanup time, expired entries marked for cleanup via * {@link ShouldDisposeFunc} (if defined) are disposed. * - * @param cleanupIntervalNanos + * @param cleanupIntervalNanos the time interval defining when we should clean up expired + * entries marked for cleanup, in nanoseconds */ public void setCleanupIntervalNanos(long cleanupIntervalNanos) { this.cleanupIntervalNanos = cleanupIntervalNanos; From 8d921753ab3d87969cff8e25e979627d06b082cd Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Tue, 23 May 2023 10:17:51 -0700 Subject: [PATCH 17/21] cleanup --- .../amazon/jdbc/ConnectionProvider.java | 2 +- .../jdbc/HikariPooledConnectionProvider.java | 2 +- .../jdbc/util/SlidingExpirationCache.java | 4 ++-- .../integration/util/AuroraTestUtility.java | 1 - .../jdbc/util/SlidingExpirationCacheTest.java | 20 +++++++++++-------- 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java index 5ee4289b0..19cfbc639 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/ConnectionProvider.java @@ -65,7 +65,7 @@ boolean acceptsUrl( */ HostSpec getHostSpecByStrategy( @NonNull List hosts, @NonNull HostRole role, @NonNull String strategy) - throws SQLException; + throws SQLException, UnsupportedOperationException; /** * Called once per connection that needs to be created. diff --git a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java index 9373728b2..a2fd97e03 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java +++ b/wrapper/src/main/java/software/amazon/jdbc/HikariPooledConnectionProvider.java @@ -50,7 +50,7 @@ public class HikariPooledConnectionProvider implements PooledConnectionProvider, private static final RdsUtils rdsUtils = new RdsUtils(); private static SlidingExpirationCache databasePools = new SlidingExpirationCache<>( - (hikariDataSource) -> hikariDataSource.getHikariPoolMXBean().getActiveConnections() > 0, + (hikariDataSource) -> hikariDataSource.getHikariPoolMXBean().getActiveConnections() == 0, HikariDataSource::close ); private static long poolExpirationCheckNanos = TimeUnit.MINUTES.toNanos(30); diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java b/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java index 66ae0666b..2d5757ced 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java @@ -168,7 +168,7 @@ public void setCleanupIntervalNanos(long cleanupIntervalNanos) { * @param the type of object being analyzed for disposal */ public interface ShouldDisposeFunc { - boolean isValid(V item); + boolean shouldDispose(V item); } /** @@ -210,7 +210,7 @@ public CacheItem(final V item, final long expirationTimeNano) { */ boolean shouldCleanup() { if (shouldDisposeFunc != null) { - return System.nanoTime() > expirationTimeNano && !shouldDisposeFunc.isValid(this.item); + return System.nanoTime() > expirationTimeNano && shouldDisposeFunc.shouldDispose(this.item); } return System.nanoTime() > expirationTimeNano; } diff --git a/wrapper/src/test/java/integration/util/AuroraTestUtility.java b/wrapper/src/test/java/integration/util/AuroraTestUtility.java index 4d1681b01..f04f9c2c5 100644 --- a/wrapper/src/test/java/integration/util/AuroraTestUtility.java +++ b/wrapper/src/test/java/integration/util/AuroraTestUtility.java @@ -314,7 +314,6 @@ public TestInstanceInfo createInstance(String instanceId) throws InterruptedExce (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(10))); if (waiterResponse.matched().exception().isPresent()) { - deleteCluster(); throw new InterruptedException( "Instance creation timeout for " + instanceId + ". The instance was not available within 5 minutes"); diff --git a/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationCacheTest.java b/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationCacheTest.java index 944d760ca..8878219c8 100644 --- a/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationCacheTest.java +++ b/wrapper/src/test/java/software/amazon/jdbc/util/SlidingExpirationCacheTest.java @@ -18,9 +18,11 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; import java.util.HashMap; import java.util.Map; @@ -33,7 +35,7 @@ public class SlidingExpirationCacheTest { @Mock SlidingExpirationCache.ItemDisposalFunc mockDisposalFunc; - @Mock SlidingExpirationCache.ShouldDisposeFunc mockIsItemValidFunc; + @Mock SlidingExpirationCache.ShouldDisposeFunc mockShouldDisposeFunc; private AutoCloseable closeable; @BeforeEach @@ -49,9 +51,10 @@ void tearDown() throws Exception { @Test public void testComputeIfAbsent() throws InterruptedException { final SlidingExpirationCache - map = new SlidingExpirationCache<>(mockIsItemValidFunc, mockDisposalFunc); + map = new SlidingExpirationCache<>(mockShouldDisposeFunc, mockDisposalFunc); final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); map.setCleanupIntervalNanos(timeoutNanos * 2); + when(mockShouldDisposeFunc.shouldDispose(any())).thenReturn(true); map.computeIfAbsent(1, (key) -> "a", timeoutNanos); assertEquals("a", map.computeIfAbsent(1, (key) -> "b", timeoutNanos)); @@ -61,14 +64,14 @@ public void testComputeIfAbsent() throws InterruptedException { assertEquals("a", map.computeIfAbsent(1, (key) -> "b", timeoutNanos)); TimeUnit.NANOSECONDS.sleep(timeoutNanos); assertEquals("b", map.computeIfAbsent(1, (key) -> "b", timeoutNanos)); - verify(mockIsItemValidFunc, times(1)).isValid(eq("a")); + verify(mockShouldDisposeFunc, times(1)).shouldDispose(eq("a")); verify(mockDisposalFunc, times(1)).dispose(eq("a")); } @Test public void testRemove() { final SlidingExpirationCache - map = new SlidingExpirationCache<>(mockIsItemValidFunc, mockDisposalFunc); + map = new SlidingExpirationCache<>(mockShouldDisposeFunc, mockDisposalFunc); final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); map.computeIfAbsent(1, (key) -> "a", timeoutNanos); @@ -80,7 +83,7 @@ public void testRemove() { @Test public void testClear() { final SlidingExpirationCache - map = new SlidingExpirationCache<>(mockIsItemValidFunc, mockDisposalFunc); + map = new SlidingExpirationCache<>(mockShouldDisposeFunc, mockDisposalFunc); final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); map.computeIfAbsent(1, (key) -> "a", timeoutNanos); map.computeIfAbsent(2, (key) -> "b", timeoutNanos); @@ -94,7 +97,7 @@ public void testClear() { @Test public void testGetEntries() throws InterruptedException { final SlidingExpirationCache - map = new SlidingExpirationCache<>(mockIsItemValidFunc, mockDisposalFunc); + map = new SlidingExpirationCache<>(mockShouldDisposeFunc, mockDisposalFunc); final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); Map expectedEntries = new HashMap<>(); expectedEntries.put(1, "a"); @@ -110,9 +113,10 @@ public void testGetEntries() throws InterruptedException { @Test public void testCleanup() throws InterruptedException { final SlidingExpirationCache - map = new SlidingExpirationCache<>(mockIsItemValidFunc, mockDisposalFunc); + map = new SlidingExpirationCache<>(mockShouldDisposeFunc, mockDisposalFunc); final long timeoutNanos = TimeUnit.SECONDS.toNanos(1); map.setCleanupIntervalNanos(timeoutNanos * 2); + when(mockShouldDisposeFunc.shouldDispose(any())).thenReturn(true); map.computeIfAbsent(1, (key) -> "a", timeoutNanos); TimeUnit.NANOSECONDS.sleep(timeoutNanos); map.computeIfAbsent(2, (key) -> "b", timeoutNanos); @@ -121,7 +125,7 @@ public void testCleanup() throws InterruptedException { assertTrue(map.getCache().get(1).shouldCleanup()); TimeUnit.NANOSECONDS.sleep(timeoutNanos); assertEquals("c", map.computeIfAbsent(2, (key) -> "c", timeoutNanos)); - verify(mockIsItemValidFunc, times(2)).isValid(eq("a")); + verify(mockShouldDisposeFunc, times(2)).shouldDispose(eq("a")); verify(mockDisposalFunc, times(1)).dispose(eq("a")); verify(mockDisposalFunc, times(1)).dispose(eq("b")); assertEquals("d", map.computeIfAbsent(1, (key) -> "d", timeoutNanos)); From 802e5e0e7171026f8d573a21a953cbc90c70dc83 Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Tue, 23 May 2023 10:48:10 -0700 Subject: [PATCH 18/21] Remove keepalive from ReadWriteSplittingTests --- .../refactored/container/tests/ReadWriteSplittingTests.java | 1 - 1 file changed, 1 deletion(-) diff --git a/wrapper/src/test/java/integration/refactored/container/tests/ReadWriteSplittingTests.java b/wrapper/src/test/java/integration/refactored/container/tests/ReadWriteSplittingTests.java index 1c23e6d15..ea7ff4367 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/ReadWriteSplittingTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/ReadWriteSplittingTests.java @@ -597,7 +597,6 @@ protected HikariPoolConfigurator getHikariConfig(int maxPoolSize) { final HikariConfig config = new HikariConfig(); config.setMaximumPoolSize(maxPoolSize); config.setInitializationFailTimeout(75000); - config.setKeepaliveTime(30000); return config; }; } From 290c3bede6faa5c323eddb3550df3fbe599c6542 Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Tue, 23 May 2023 14:43:16 -0700 Subject: [PATCH 19/21] Increase create/delete instance timeout for autoscaling tests --- wrapper/src/test/java/integration/util/AuroraTestUtility.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/wrapper/src/test/java/integration/util/AuroraTestUtility.java b/wrapper/src/test/java/integration/util/AuroraTestUtility.java index f04f9c2c5..1f1bbd196 100644 --- a/wrapper/src/test/java/integration/util/AuroraTestUtility.java +++ b/wrapper/src/test/java/integration/util/AuroraTestUtility.java @@ -311,7 +311,7 @@ public TestInstanceInfo createInstance(String instanceId) throws InterruptedExce (requestBuilder) -> requestBuilder.filters( Filter.builder().name("db-instance-id").values(instanceId).build()), - (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(10))); + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(15))); if (waiterResponse.matched().exception().isPresent()) { throw new InterruptedException( @@ -360,7 +360,7 @@ public void deleteInstance(TestInstanceInfo instanceToDelete) throws Interrupted (requestBuilder) -> requestBuilder.filters( Filter.builder().name("db-instance-id").values(instanceToDelete.getInstanceId()) .build()), - (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(10))); + (configurationBuilder) -> configurationBuilder.waitTimeout(Duration.ofMinutes(15))); if (waiterResponse.matched().exception().isPresent()) { throw new InterruptedException( From d55a1fff8d1da1bf7ef934385039312134626d36 Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Wed, 24 May 2023 09:21:28 -0700 Subject: [PATCH 20/21] PR Suggestions --- .../java/software/amazon/jdbc/util/SlidingExpirationCache.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java b/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java index 2d5757ced..5e5484195 100644 --- a/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java +++ b/wrapper/src/main/java/software/amazon/jdbc/util/SlidingExpirationCache.java @@ -90,8 +90,8 @@ public V computeIfAbsent( * @param key the key associated with the value to be removed/disposed */ public void remove(final K key) { - cleanUp(); removeAndDispose(key); + cleanUp(); } private void removeAndDispose(K key) { From ebe85e14a2468cf980824548b09ac7d180be9cba Mon Sep 17 00:00:00 2001 From: Aaron Congo Date: Wed, 24 May 2023 13:44:56 -0700 Subject: [PATCH 21/21] PR suggestions --- .../UsingTheReadWriteSplittingPlugin.md | 21 ++++++++++++++++++- .../src/main/resources/messages.properties | 4 ++-- .../container/ConnectionStringHelper.java | 1 - .../container/tests/AutoscalingTests.java | 7 +++---- 4 files changed, 25 insertions(+), 8 deletions(-) diff --git a/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md b/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md index 4db7c0980..edd26d8ea 100644 --- a/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md +++ b/docs/using-the-jdbc-driver/using-plugins/UsingTheReadWriteSplittingPlugin.md @@ -39,10 +39,29 @@ The wrapper driver currently uses [Hikari](https://github.com/brettwooldridge/Hi - username - password -You can optionally pass in a `HikariPoolMapping` function as a second parameter to the `HikariPooledConnectionProvider`. This allows you to decide when new connection pools should be created by defining what is included in the pool map key. A new pool will be created each time a new connection is requested with a unique key. By default, a new pool will be created for each unique instance-user combination. If you would like to define a different key system, you should pass in a `HikariPoolMapping` function defining this logic. Please see [ReadWriteSplittingPostgresExample.java](../../../examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java) for an example of how to configure the pool map key. +You can optionally pass in a `HikariPoolMapping` function as a second parameter to the `HikariPooledConnectionProvider`. This allows you to decide when new connection pools should be created by defining what is included in the pool map key. A new pool will be created each time a new connection is requested with a unique key. By default, a new pool will be created for each unique instance-user combination. If you would like to define a different key system, you should pass in a `HikariPoolMapping` function defining this logic. A simple example is show below. Please see [ReadWriteSplittingPostgresExample.java](../../../examples/AWSDriverExample/src/main/java/software/amazon/ReadWriteSplittingPostgresExample.java) for the full example. > :warning: If you do not include the username in your HikariPoolMapping function, connection pools may be shared between different users. +```java +props.setProperty("somePropertyValue", "1"); // used in getPoolKey +final HikariPooledConnectionProvider connProvider = + new HikariPooledConnectionProvider( + ReadWriteSplittingPostgresExample::getHikariConfig, + ReadWriteSplittingPostgresExample::getPoolKey + ); +ConnectionProviderManager.setConnectionProvider(connProvider); + +private static String getPoolKey(HostSpec hostSpec, Properties props) { + // Include the URL, user, and somePropertyValue in the connection pool key so that a new + // connection pool will be opened for each different instance-user-somePropertyValue + // combination. + final String user = props.getProperty(PropertyDefinition.USER.name); + final String somePropertyValue = props.getProperty("somePropertyValue"); + return hostSpec.getUrl() + user + somePropertyValue; +} +``` + 2. Call `ConnectionProviderManager.setConnectionProvider`, passing in the `HikariPooledConnectionProvider` you created in step 1. 3. Continue as normal: create connections and use them as needed. diff --git a/wrapper/src/main/resources/messages.properties b/wrapper/src/main/resources/messages.properties index e576d3feb..69482da14 100644 --- a/wrapper/src/main/resources/messages.properties +++ b/wrapper/src/main/resources/messages.properties @@ -199,8 +199,8 @@ PropertyUtils.failedToSetPropertyWithReason=Failed to set property ''{0}'' on ta # Read Write Splitting Plugin ReadWriteSplittingPlugin.setReadOnlyOnClosedConnection=setReadOnly cannot be called on a closed connection. -ReadWriteSplittingPlugin.errorSwitchingToCachedReader=An error occurred while trying to switch to a cached reader connection to ''{0}''. The driver will attempt to establish a new reader connection. -ReadWriteSplittingPlugin.errorSwitchingToCachedReaderWithCause=An error occurred while trying to switch to a cached reader connection to ''{0}''. Error message: ''{1}''. The driver will attempt to establish a new reader connection. +ReadWriteSplittingPlugin.errorSwitchingToCachedReader=An error occurred while trying to switch to a cached reader connection: ''{0}''. The driver will attempt to establish a new reader connection. +ReadWriteSplittingPlugin.errorSwitchingToCachedReaderWithCause=An error occurred while trying to switch to a cached reader connection: ''{0}''. Error message: ''{1}''. The driver will attempt to establish a new reader connection. ReadWriteSplittingPlugin.errorSwitchingToReader=An error occurred while trying to switch to a reader connection. ReadWriteSplittingPlugin.errorSwitchingToWriter=An error occurred while trying to switch to a writer connection. ReadWriteSplittingPlugin.closingInternalConnections=Closing all internal connections except for the current one. diff --git a/wrapper/src/test/java/integration/refactored/container/ConnectionStringHelper.java b/wrapper/src/test/java/integration/refactored/container/ConnectionStringHelper.java index 8eac098f8..4dcf52886 100644 --- a/wrapper/src/test/java/integration/refactored/container/ConnectionStringHelper.java +++ b/wrapper/src/test/java/integration/refactored/container/ConnectionStringHelper.java @@ -103,7 +103,6 @@ public static String getWrapperUrl(TestInstanceInfo instance) { TestEnvironment.getCurrent().getInfo().getDatabaseInfo().getDefaultDbName()); } - public static String getWrapperUrl(String host, int port, String databaseName) { return getWrapperUrl(TestEnvironment.getCurrent().getCurrentDriver(), host, port, databaseName); } diff --git a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java index aeeaf87ff..ccde38740 100644 --- a/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java +++ b/wrapper/src/test/java/integration/refactored/container/tests/AutoscalingTests.java @@ -203,10 +203,9 @@ public void test_pooledConnectionAutoScaling_failoverFromDeletedReader() try { for (int i = 1; i < instances.size(); i++) { final String connString = ConnectionStringHelper.getWrapperUrl(instances.get(i)); - final Connection conn1 = DriverManager.getConnection(connString, props); - connections.add(conn1); - final Connection conn2 = DriverManager.getConnection(connString, props); - connections.add(conn2); + // Create 2 connections per instance. + connections.add(DriverManager.getConnection(connString, props)); + connections.add(DriverManager.getConnection(connString, props)); } final Connection newInstanceConn;