diff --git a/bin/control.bat b/bin/control.bat index 15d5e6fcb619b..4894cbc4322ad 100644 --- a/bin/control.bat +++ b/bin/control.bat @@ -156,6 +156,11 @@ if %ERRORLEVEL% equ 0 ( if "%JVM_OPTS%" == "" set JVM_OPTS=-Xms256m -Xmx1g ) +:: +:: Uncomment to enable experimental commands [--wal] +:: +:: set JVM_OPTS=%JVM_OPTS% -DIGNITE_ENABLE_EXPERIMENTAL_COMMAND=true + :: :: Uncomment the following GC settings if you see spikes in your throughput due to Garbage Collection. :: diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/FileStoreHeapUtilizationJolBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/FileStoreHeapUtilizationJolBenchmark.java new file mode 100644 index 0000000000000..1dc7d474270ed --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/FileStoreHeapUtilizationJolBenchmark.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jol; + +import java.io.File; +import java.nio.ByteBuffer; +import java.nio.file.Path; +import java.util.LinkedList; +import java.util.List; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.processors.cache.persistence.file.AsyncFileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FileVersionCheckingFactory; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.openjdk.jol.info.GraphLayout; + +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.PART_FILE_TEMPLATE; + +/** + * + */ +public class FileStoreHeapUtilizationJolBenchmark { + /** */ + private void benchmark() throws IgniteCheckedException { + FilePageStoreFactory factory = new FileVersionCheckingFactory( + new AsyncFileIOFactory(), + new AsyncFileIOFactory(), + new DataStorageConfiguration() + .setPageSize(4096) + ); + + List stores = new LinkedList<>(); + + File workDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false); + + for (int i = 0; i < 10000; i++) { + final int p = i; + + PageStore ps = factory.createPageStore( + PageMemory.FLAG_DATA, + () -> getPartitionFilePath(workDir, p), + d -> { } + ); + + ps.ensure(); + + ps.write(0, ByteBuffer.allocate(256), 1, false); + + stores.add(ps); + } + + System.gc(); + + GraphLayout layout = GraphLayout.parseInstance(stores); + + System.out.println("heap usage: " + layout.totalSize()); + + U.delete(workDir); + } + + /** */ + private Path getPartitionFilePath(File cacheWorkDir, int partId) { + return new File(cacheWorkDir, String.format(PART_FILE_TEMPLATE, partId)).toPath(); + } + + /** */ + public static void main(String[] args) throws Exception { + new FileStoreHeapUtilizationJolBenchmark().benchmark(); + } +} diff --git a/modules/camel/pom.xml b/modules/camel/pom.xml index 0d65ce80aabe5..6cd725f5ba913 100644 --- a/modules/camel/pom.xml +++ b/modules/camel/pom.xml @@ -35,7 +35,7 @@ http://ignite.apache.org - 18.0 + 25.1-jre 2.5.0 diff --git a/modules/cassandra/store/pom.xml b/modules/cassandra/store/pom.xml index 32b10f5225b7b..8922a53472515 100644 --- a/modules/cassandra/store/pom.xml +++ b/modules/cassandra/store/pom.xml @@ -39,7 +39,7 @@ 3.0.0 3.3 4.1.27.Final - 19.0 + 25.1-jre 3.0.2 diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java index 4fb0cb27d7c8f..4d59e54716115 100644 --- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java +++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java @@ -648,17 +648,25 @@ private void createKeyspace(KeyValuePersistenceSettings settings) { while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) { try { - log.info("-----------------------------------------------------------------------"); - log.info("Creating Cassandra keyspace '" + settings.getKeyspace() + "'"); - log.info("-----------------------------------------------------------------------\n\n" + - settings.getKeyspaceDDLStatement() + "\n"); - log.info("-----------------------------------------------------------------------"); + if (log.isInfoEnabled()) { + log.info("-----------------------------------------------------------------------"); + log.info("Creating Cassandra keyspace '" + settings.getKeyspace() + "'"); + log.info("-----------------------------------------------------------------------\n\n" + + settings.getKeyspaceDDLStatement() + "\n"); + log.info("-----------------------------------------------------------------------"); + } + session().execute(settings.getKeyspaceDDLStatement()); - log.info("Cassandra keyspace '" + settings.getKeyspace() + "' was successfully created"); + + if (log.isInfoEnabled()) + log.info("Cassandra keyspace '" + settings.getKeyspace() + "' was successfully created"); + return; } catch (AlreadyExistsException ignored) { - log.info("Cassandra keyspace '" + settings.getKeyspace() + "' already exist"); + if (log.isInfoEnabled()) + log.info("Cassandra keyspace '" + settings.getKeyspace() + "' already exist"); + return; } catch (Throwable e) { @@ -689,17 +697,25 @@ private void createTable(String table, KeyValuePersistenceSettings settings) { while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) { try { - log.info("-----------------------------------------------------------------------"); - log.info("Creating Cassandra table '" + tableFullName + "'"); - log.info("-----------------------------------------------------------------------\n\n" + + if (log.isInfoEnabled()) { + log.info("-----------------------------------------------------------------------"); + log.info("Creating Cassandra table '" + tableFullName + "'"); + log.info("-----------------------------------------------------------------------\n\n" + settings.getTableDDLStatement(table) + "\n"); - log.info("-----------------------------------------------------------------------"); + log.info("-----------------------------------------------------------------------"); + } + session().execute(settings.getTableDDLStatement(table)); - log.info("Cassandra table '" + tableFullName + "' was successfully created"); + + if (log.isInfoEnabled()) + log.info("Cassandra table '" + tableFullName + "' was successfully created"); + return; } catch (AlreadyExistsException ignored) { - log.info("Cassandra table '" + tableFullName + "' already exist"); + if (log.isInfoEnabled()) + log.info("Cassandra table '" + tableFullName + "' already exist"); + return; } catch (Throwable e) { @@ -741,14 +757,19 @@ private void createTableIndexes(String table, KeyValuePersistenceSettings settin while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) { try { - log.info("-----------------------------------------------------------------------"); - log.info("Creating indexes for Cassandra table '" + tableFullName + "'"); - log.info("-----------------------------------------------------------------------"); + if (log.isInfoEnabled()) { + log.info("-----------------------------------------------------------------------"); + log.info("Creating indexes for Cassandra table '" + tableFullName + "'"); + log.info("-----------------------------------------------------------------------"); + } for (String statement : indexDDLStatements) { try { - log.info(statement); - log.info("-----------------------------------------------------------------------"); + if (log.isInfoEnabled()) { + log.info(statement); + log.info("-----------------------------------------------------------------------"); + } + session().execute(statement); } catch (AlreadyExistsException ignored) { @@ -759,7 +780,8 @@ private void createTableIndexes(String table, KeyValuePersistenceSettings settin } } - log.info("Indexes for Cassandra table '" + tableFullName + "' were successfully created"); + if (log.isInfoEnabled()) + log.info("Indexes for Cassandra table '" + tableFullName + "' were successfully created"); return; } diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java index 9485d0d54212c..652d635cbe4a1 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java @@ -31,6 +31,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.testframework.GridTestUtils; /** * Test that checks indexes handling with JDBC. @@ -168,9 +169,9 @@ public void testCreateIndex() throws SQLException { public void testCreateIndexWithDuplicateName() throws SQLException { jdbcRun(CREATE_INDEX); - assertSqlException(new RunnableX() { + assertSqlException(new GridTestUtils.RunnableX() { /** {@inheritDoc} */ - @Override public void run() throws Exception { + @Override public void runx() throws Exception { jdbcRun(CREATE_INDEX); } }); @@ -219,9 +220,9 @@ public void testDropIndex() throws SQLException { * Test that dropping a non-existent index yields an error. */ public void testDropMissingIndex() { - assertSqlException(new RunnableX() { + assertSqlException(new GridTestUtils.RunnableX() { /** {@inheritDoc} */ - @Override public void run() throws Exception { + @Override public void runx() throws Exception { jdbcRun(DROP_INDEX); } }); @@ -310,11 +311,11 @@ private IgniteCache cache() { * * @param r Runnable. */ - private static void assertSqlException(RunnableX r) { + private static void assertSqlException(GridTestUtils.RunnableX r) { // We expect IgniteSQLException with given code inside CacheException inside JDBC SQLException. try { - r.run(); + r.runx(); } catch (SQLException e) { return; @@ -325,16 +326,4 @@ private static void assertSqlException(RunnableX r) { fail(SQLException.class.getSimpleName() + " is not thrown."); } - - /** - * Runnable which can throw checked exceptions. - */ - private interface RunnableX { - /** - * Do run. - * - * @throws Exception If failed. - */ - public void run() throws Exception; - } } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java index 2ba36c369c227..7ac96990a01eb 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java @@ -28,12 +28,12 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.concurrent.Callable; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor; import org.apache.ignite.internal.processors.port.GridPortRecord; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.GridTestUtils.RunnableX; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; /** @@ -45,27 +45,18 @@ public class JdbcThinAbstractSelfTest extends GridCommonAbstractTest { * @param r Runnable to check support. */ protected void checkNotSupported(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); - - return null; - } - }, SQLFeatureNotSupportedException.class, null); + GridTestUtils.assertThrowsWithCause(r, SQLFeatureNotSupportedException.class); } /** * @param r Runnable to check on closed connection. */ protected void checkConnectionClosed(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); + GridTestUtils.assertThrowsAnyCause(log, + () -> { + r.run(); - return null; - } + return null; }, SQLException.class, "Connection is closed"); } @@ -73,13 +64,11 @@ protected void checkConnectionClosed(final RunnableX r) { * @param r Runnable to check on closed statement. */ protected void checkStatementClosed(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); + GridTestUtils.assertThrowsAnyCause(log, + () -> { + r.run(); - return null; - } + return null; }, SQLException.class, "Statement is closed"); } @@ -87,26 +76,14 @@ protected void checkStatementClosed(final RunnableX r) { * @param r Runnable to check on closed result set. */ protected void checkResultSetClosed(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); + GridTestUtils.assertThrowsAnyCause(log, + () -> { + r.run(); - return null; - } + return null; }, SQLException.class, "Result set is closed"); } - /** - * Runnable that can throw an exception. - */ - interface RunnableX { - /** - * @throws Exception On error. - */ - void run() throws Exception; - } - /** * @param node Node to connect to. * @param params Connection parameters. diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java index ad1e3126c88aa..22d7d71ea8f1f 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java @@ -21,6 +21,7 @@ import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.affinity.AffinityKey; @@ -31,6 +32,7 @@ import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; import static org.apache.ignite.cache.CacheMode.PARTITIONED; @@ -266,6 +268,37 @@ public void testCalculatedValue() throws Exception { assert cnt == 3; } + /** + * @throws Exception If failed. + */ + public void testWrongArgumentType() throws Exception { + try (ResultSet rs = stmt.executeQuery("select * from \"org\".Organization where name = '2'")) { + assertFalse(rs.next()); + } + + // Check non-indexed field. + GridTestUtils.assertThrowsWithCause(() -> { + try (ResultSet rs = stmt.executeQuery("select * from \"org\".Organization where name = 2")) { + assertFalse(rs.next()); + } + + return null; + }, SQLException.class); + + // Check indexed field. + try (ResultSet rs = stmt.executeQuery("select * from \"pers\".Person where name = '2'")) { + assertFalse(rs.next()); + } + + GridTestUtils.assertThrowsWithCause(() -> { + try (ResultSet rs = stmt.executeQuery("select * from \"pers\".Person where name = 2")) { + assertFalse(rs.next()); + } + + return null; + }, SQLException.class); + } + /** * Person. */ @@ -276,7 +309,7 @@ private static class Person implements Serializable { private final int id; /** Name. */ - @QuerySqlField(index = false) + @QuerySqlField(index = true) private final String name; /** Age. */ diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java index 6403cac5037f1..bd816e6ef5d2c 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java @@ -58,6 +58,7 @@ import static java.sql.ResultSet.TYPE_FORWARD_ONLY; import static java.sql.Statement.NO_GENERATED_KEYS; import static java.sql.Statement.RETURN_GENERATED_KEYS; +import static org.apache.ignite.testframework.GridTestUtils.RunnableX; /** * Connection test. @@ -570,7 +571,7 @@ public void testCreateStatement() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStatement(); } }); @@ -623,7 +624,7 @@ public void testCreateStatement2() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStatement(TYPE_FORWARD_ONLY, CONCUR_READ_ONLY); } @@ -682,7 +683,7 @@ public void testCreateStatement3() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStatement(TYPE_FORWARD_ONLY, CONCUR_READ_ONLY, HOLD_CURSORS_OVER_COMMIT); } @@ -716,7 +717,7 @@ public void testPrepareStatement() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.prepareStatement(sqlText); } }); @@ -774,7 +775,7 @@ public void testPrepareStatement3() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.prepareStatement(sqlText, TYPE_FORWARD_ONLY, CONCUR_READ_ONLY); } }); @@ -839,7 +840,7 @@ public void testPrepareStatement4() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.prepareStatement(sqlText, TYPE_FORWARD_ONLY, CONCUR_READ_ONLY, HOLD_CURSORS_OVER_COMMIT); } }); @@ -961,7 +962,7 @@ public void testNativeSql() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.nativeSQL(sqlText); } }); @@ -987,7 +988,7 @@ public void testGetSetAutoCommit() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setAutoCommit(true); } }); @@ -1022,7 +1023,7 @@ public void testCommit() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.commit(); } }); @@ -1057,7 +1058,7 @@ public void testRollback() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.rollback(); } }); @@ -1077,7 +1078,7 @@ public void testGetMetaData() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getMetaData(); } }); @@ -1093,14 +1094,14 @@ public void testGetSetReadOnly() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setReadOnly(true); } }); // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.isReadOnly(); } }); @@ -1124,14 +1125,14 @@ public void testGetSetCatalog() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setCatalog(""); } }); // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getCatalog(); } }); @@ -1176,14 +1177,14 @@ public void testGetSetTransactionIsolation() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getTransactionIsolation(); } }); // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setTransactionIsolation(TRANSACTION_SERIALIZABLE); } }); @@ -1209,14 +1210,14 @@ public void testClearGetWarnings() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getWarnings(); } }); // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.clearWarnings(); } }); @@ -1355,7 +1356,7 @@ public void testSetSavepoint() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(); } }); @@ -1363,7 +1364,7 @@ public void testSetSavepoint() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(); } }); @@ -1409,7 +1410,7 @@ public void testSetSavepointName() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(name); } }); @@ -1417,7 +1418,7 @@ public void testSetSavepointName() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(name); } }); @@ -1463,7 +1464,7 @@ public void testRollbackSavePoint() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.rollback(savepoint); } }); @@ -1471,7 +1472,7 @@ public void testRollbackSavePoint() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.rollback(savepoint); } }); @@ -1501,7 +1502,7 @@ public void testReleaseSavepoint() throws Exception { final Savepoint savepoint = getFakeSavepoint(); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.releaseSavepoint(savepoint); } }); @@ -1509,7 +1510,7 @@ public void testReleaseSavepoint() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.releaseSavepoint(savepoint); } }); @@ -1655,7 +1656,7 @@ public void testGetSetClientInfoPair() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getClientInfo(name); } }); @@ -1693,7 +1694,7 @@ public void testGetSetClientInfoProperties() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getClientInfo(); } }); @@ -1734,7 +1735,7 @@ public void testCreateArrayOf() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createArrayOf(typeName, elements); } }); @@ -1742,7 +1743,7 @@ public void testCreateArrayOf() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createArrayOf(typeName, elements); } }); @@ -1770,7 +1771,7 @@ public void testCreateStruct() throws Exception { final Object[] attrs = new Object[] {100, "Tom"}; checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStruct(typeName, attrs); } }); @@ -1778,7 +1779,7 @@ public void testCreateStruct() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStruct(typeName, attrs); } }); @@ -1805,13 +1806,13 @@ public void testGetSetSchema() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSchema(schema); } }); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getSchema(); } }); @@ -1889,13 +1890,13 @@ public void testGetSetNetworkTimeout() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getNetworkTimeout(); } }); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setNetworkTimeout(executor, timeout); } }); @@ -1980,4 +1981,4 @@ private Savepoint getFakeSavepoint() { } }; } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java index c5778537096f1..4635702a9dd78 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java @@ -797,146 +797,146 @@ public void testClearParameter() throws Exception { public void testNotSupportedTypes() throws Exception { stmt = conn.prepareStatement(""); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setArray(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setAsciiStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setAsciiStream(1, null, 0); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setAsciiStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBinaryStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBinaryStream(1, null, 0); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBinaryStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBlob(1, (Blob)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBlob(1, (InputStream)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBlob(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCharacterStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCharacterStream(1, null, 0); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCharacterStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setClob(1, (Clob)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setClob(1, (Reader)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setClob(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNCharacterStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNCharacterStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNClob(1, (NClob)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNClob(1, (Reader)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNClob(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setRowId(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setRef(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setSQLXML(1, null); } }); @@ -1055,4 +1055,4 @@ private TestObject(int id) { this.id = id; } } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java index 4f9480261c66b..94713afe873fa 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java @@ -49,6 +49,9 @@ import static org.apache.ignite.cache.CacheMode.PARTITIONED; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; +import static org.apache.ignite.testframework.GridTestUtils.RunnableX; +import static org.apache.ignite.testframework.GridTestUtils.assertThrows; +import static org.apache.ignite.testframework.GridTestUtils.assertThrowsAnyCause; /** * Result set test. @@ -772,133 +775,133 @@ public void testNotSupportedTypes() throws Exception { assert rs.next(); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getArray(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getArray("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getAsciiStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getAsciiStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBinaryStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBinaryStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBlob(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBlob("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getClob(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getClob("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getCharacterStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getCharacterStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNCharacterStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNCharacterStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNClob(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNClob("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRef(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRef("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRowId(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRowId("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getSQLXML(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getSQLXML("id"); } }); @@ -913,499 +916,499 @@ public void testUpdateNotSupported() throws Exception { assert rs.next(); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBoolean(1, true); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBoolean("id", true); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateByte(1, (byte)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateByte("id", (byte)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateShort(1, (short)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateShort("id", (short)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateInt(1, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateInt("id", 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateLong(1, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateLong("id", 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateFloat(1, (float)0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateFloat("id", (float)0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDouble(1, 0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDouble("id", 0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateString(1, ""); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateString("id", ""); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTime(1, new Time(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTime("id", new Time(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDate(1, new Date(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDate("id", new Date(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTimestamp(1, new Timestamp(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTimestamp("id", new Timestamp(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBytes(1, new byte[]{}); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBytes("id", new byte[]{}); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateArray(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateArray("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob(1, (Blob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob(1, (InputStream)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob("id", (Blob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob("id", (InputStream)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob(1, (Clob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob(1, (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob("id", (Clob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob("id", (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob(1, (NClob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob(1, (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob("id", (NClob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob("id", (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRef(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRef("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRowId(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRowId("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNString(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNString("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateSQLXML(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateSQLXML("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBigDecimal(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBigDecimal("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNull(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNull("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.cancelRowUpdates(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRow(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.deleteRow(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.insertRow(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.moveToInsertRow(); } }); @@ -1423,235 +1426,235 @@ public void testExceptionOnClosedResultSet() throws Exception { rs.close(); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBoolean(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBoolean("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getByte(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getByte("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getShort(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getShort("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getInt(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getInt("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getLong(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getLong("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getFloat(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getFloat("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDouble(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDouble("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getString(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getString("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBytes(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBytes("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate(1, new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate("id", new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime(1, new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime("id", new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp(1, new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp("id", new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.wasNull(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getMetaData(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.next(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.last(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.afterLast(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.beforeFirst(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.first(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.findColumn("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRow(); } }); @@ -1847,4 +1850,4 @@ private TestObjectField(int a, String b) { return S.toString(TestObjectField.class, this); } } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java index 82c0512c7ab70..10dad914960cd 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java @@ -132,7 +132,7 @@ public class JdbcThinStatementSelfTest extends JdbcThinAbstractSelfTest { public void testExecuteQuery0() throws Exception { ResultSet rs = stmt.executeQuery(SQL); - assert rs != null; + assertNotNull(rs); int cnt = 0; @@ -140,22 +140,22 @@ public void testExecuteQuery0() throws Exception { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals("Mike", rs.getString("firstName")); + assertEquals("Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 2; + assertEquals(2, cnt); } /** @@ -177,8 +177,8 @@ public void testExecuteQuery1() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeQuery(sqlText); } }); @@ -188,15 +188,13 @@ public void testExecuteQuery1() throws Exception { * @throws Exception If failed. */ public void testExecute() throws Exception { - assert stmt.execute(SQL); + assertTrue(stmt.execute(SQL)); - assert stmt.getUpdateCount() == -1 : "Update count must be -1 for SELECT query"; + assertEquals("Update count must be -1 for SELECT query", -1, stmt.getUpdateCount()); ResultSet rs = stmt.getResultSet(); - assert rs != null; - - assert stmt.getResultSet() == null; + assertNotNull(rs); int cnt = 0; @@ -204,22 +202,24 @@ public void testExecute() throws Exception { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals( "Mike", rs.getString("firstName")); + assertEquals( "Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 2; + assertEquals(2, cnt); + + assertFalse("Statement has more results.", stmt.getMoreResults()); } /** @@ -228,11 +228,11 @@ else if (id == 3) { public void testMaxRows() throws Exception { stmt.setMaxRows(1); - assert stmt.getMaxRows() == 1; + assertEquals(1, stmt.getMaxRows()); ResultSet rs = stmt.executeQuery(SQL); - assert rs != null; + assertNotNull(rs); int cnt = 0; @@ -240,28 +240,28 @@ public void testMaxRows() throws Exception { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals( "Mike", rs.getString("firstName")); + assertEquals( "Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 1; + assertEquals(1, cnt); stmt.setMaxRows(0); rs = stmt.executeQuery(SQL); - assert rs != null; + assertNotNull(rs); cnt = 0; @@ -269,22 +269,22 @@ else if (id == 3) { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals( "Mike", rs.getString("firstName")); + assertEquals( "Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 2; + assertEquals(2, cnt); } /** @@ -295,14 +295,14 @@ public void testCloseResultSet0() throws Exception { ResultSet rs1 = stmt.executeQuery(SQL); ResultSet rs2 = stmt.executeQuery(SQL); - assert rs0.isClosed() : "ResultSet must be implicitly closed after re-execute statement"; - assert rs1.isClosed() : "ResultSet must be implicitly closed after re-execute statement"; + assertTrue("ResultSet must be implicitly closed after re-execute statement", rs0.isClosed()); + assertTrue("ResultSet must be implicitly closed after re-execute statement", rs1.isClosed()); - assert !rs2.isClosed() : "Last result set must be available"; + assertFalse("Last result set must be available", rs2.isClosed()); stmt.close(); - assert rs2.isClosed() : "ResultSet must be explicitly closed after close statement"; + assertTrue("ResultSet must be explicitly closed after close statement", rs2.isClosed()); } /** @@ -315,7 +315,7 @@ public void testCloseResultSet1() throws Exception { stmt.close(); - assert rs.isClosed() : "ResultSet must be explicitly closed after close statement"; + assertTrue("ResultSet must be explicitly closed after close statement", rs.isClosed()); } /** @@ -326,66 +326,66 @@ public void testCloseResultSetByConnectionClose() throws Exception { conn.close(); - assert stmt.isClosed() : "Statement must be implicitly closed after close connection"; - assert rs.isClosed() : "ResultSet must be implicitly closed after close connection"; + assertTrue("Statement must be implicitly closed after close connection", stmt.isClosed()); + assertTrue("ResultSet must be implicitly closed after close connection", rs.isClosed()); } /** * @throws Exception If failed. */ public void testCloseOnCompletionAfterQuery() throws Exception { - assert !stmt.isCloseOnCompletion() : "Invalid default closeOnCompletion"; + assertFalse("Invalid default closeOnCompletion", stmt.isCloseOnCompletion()); ResultSet rs0 = stmt.executeQuery(SQL); ResultSet rs1 = stmt.executeQuery(SQL); - assert rs0.isClosed() : "Result set must be closed implicitly"; + assertTrue("Result set must be closed implicitly", rs0.isClosed()); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); rs1.close(); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); ResultSet rs2 = stmt.executeQuery(SQL); stmt.closeOnCompletion(); - assert stmt.isCloseOnCompletion() : "Invalid closeOnCompletion"; + assertTrue("Invalid closeOnCompletion", stmt.isCloseOnCompletion()); rs2.close(); - assert stmt.isClosed() : "Statement must be closed"; + assertTrue("Statement must be closed", stmt.isClosed()); } /** * @throws Exception If failed. */ public void testCloseOnCompletionBeforeQuery() throws Exception { - assert !stmt.isCloseOnCompletion() : "Invalid default closeOnCompletion"; + assertFalse("Invalid default closeOnCompletion", stmt.isCloseOnCompletion()); ResultSet rs0 = stmt.executeQuery(SQL); ResultSet rs1 = stmt.executeQuery(SQL); - assert rs0.isClosed() : "Result set must be closed implicitly"; + assertTrue("Result set must be closed implicitly", rs0.isClosed()); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); rs1.close(); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); stmt.closeOnCompletion(); ResultSet rs2 = stmt.executeQuery(SQL); - assert stmt.isCloseOnCompletion() : "Invalid closeOnCompletion"; + assertTrue("Invalid closeOnCompletion", stmt.isCloseOnCompletion()); rs2.close(); - assert stmt.isClosed() : "Statement must be closed"; + assertTrue("Statement must be closed", stmt.isClosed()); } /** @@ -414,7 +414,7 @@ public void testExecuteQueryTimeout() throws Exception { * @throws Exception If failed. */ public void testExecuteQueryMultipleOnlyResultSets() throws Exception { - assert conn.getMetaData().supportsMultipleResultSets(); + assertTrue(conn.getMetaData().supportsMultipleResultSets()); int stmtCnt = 10; @@ -543,8 +543,8 @@ public void testExecuteUpdate() throws Exception { stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate(sqlText); } }); @@ -634,15 +634,15 @@ public void testGetSetMaxFieldSizeUnsupported() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMaxFieldSize(); } }); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setMaxFieldSize(100); } }); @@ -684,15 +684,15 @@ public void testGetSetMaxRows() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMaxRows(); } }); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setMaxRows(maxRows); } }); @@ -728,8 +728,8 @@ public void testSetEscapeProcessing() throws Exception { stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setEscapeProcessing(true); } }); @@ -765,15 +765,15 @@ public void testGetSetQueryTimeout() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getQueryTimeout(); } }); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setQueryTimeout(timeout); } }); @@ -783,7 +783,7 @@ public void testGetSetQueryTimeout() throws Exception { * @throws Exception If failed. */ public void testMaxFieldSize() throws Exception { - assert stmt.getMaxFieldSize() >= 0; + assertTrue(stmt.getMaxFieldSize() >= 0); GridTestUtils.assertThrows(log, new Callable() { @@ -797,8 +797,8 @@ public void testMaxFieldSize() throws Exception { "Invalid field limit" ); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setMaxFieldSize(100); } }); @@ -808,22 +808,22 @@ public void testMaxFieldSize() throws Exception { * @throws Exception If failed. */ public void testQueryTimeout() throws Exception { - assert stmt.getQueryTimeout() == 0 : "Default timeout invalid: " + stmt.getQueryTimeout(); + assertEquals("Default timeout invalid: " + stmt.getQueryTimeout(), 0, stmt.getQueryTimeout()); stmt.setQueryTimeout(10); - assert stmt.getQueryTimeout() == 10; + assertEquals(10, stmt.getQueryTimeout()); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getQueryTimeout(); } }); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setQueryTimeout(10); } }); @@ -835,18 +835,18 @@ public void testQueryTimeout() throws Exception { public void testWarningsOnClosedStatement() throws Exception { stmt.clearWarnings(); - assert stmt.getWarnings() == null; + assertNull(null, stmt.getWarnings()); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getWarnings(); } }); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.clearWarnings(); } }); @@ -856,16 +856,16 @@ public void testWarningsOnClosedStatement() throws Exception { * @throws Exception If failed. */ public void testCursorName() throws Exception { - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCursorName("test"); } }); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCursorName("test"); } }); @@ -875,22 +875,22 @@ public void testCursorName() throws Exception { * @throws Exception If failed. */ public void testGetMoreResults() throws Exception { - assert !stmt.getMoreResults(); + assertFalse(stmt.getMoreResults()); stmt.execute("select 1; "); ResultSet rs = stmt.getResultSet(); - assert !stmt.getMoreResults(); + assertFalse(stmt.getMoreResults()); - assert stmt.getResultSet() == null; + assertNull(stmt.getResultSet()); - assert rs.isClosed(); + assertTrue(rs.isClosed()); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMoreResults(); } }); @@ -899,37 +899,59 @@ public void testGetMoreResults() throws Exception { /** * @throws Exception If failed. */ - public void testGetMoreResults1() throws Exception { - assert !stmt.getMoreResults(Statement.CLOSE_CURRENT_RESULT); - assert !stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); - assert !stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS); + public void testGetMoreResultsKeepCurrent() throws Exception { + assertFalse(stmt.getMoreResults(Statement.CLOSE_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS)); stmt.execute("select 1; "); ResultSet rs = stmt.getResultSet(); - assert !stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); + assertFalse(stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT)); - assert !rs.isClosed(); + assertFalse(rs.isClosed()); - assert !stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS); + stmt.close(); - assert rs.isClosed(); + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { + stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); + } + }); + } + + /** + * @throws Exception If failed. + */ + @org.junit.Test + public void testGetMoreResultsCloseAll() throws Exception { + assertFalse(stmt.getMoreResults(Statement.CLOSE_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS)); + + stmt.execute("select 1; "); + + ResultSet rs = stmt.getResultSet(); + + assertFalse(stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS)); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); } }); } /** + * Verifies that emty batch can be performed. + * * @throws Exception If failed. */ public void testBatchEmpty() throws Exception { - assert conn.getMetaData().supportsBatchUpdates(); + assertTrue(conn.getMetaData().supportsBatchUpdates()); stmt.addBatch(""); stmt.clearBatch(); @@ -951,7 +973,7 @@ public void testBatchEmpty() throws Exception { * @throws Exception If failed. */ public void testFetchDirection() throws Exception { - assert stmt.getFetchDirection() == ResultSet.FETCH_FORWARD; + assertEquals(ResultSet.FETCH_FORWARD, stmt.getFetchDirection()); GridTestUtils.assertThrows(log, new Callable() { @@ -967,14 +989,14 @@ public void testFetchDirection() throws Exception { stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setFetchDirection(-1); } }); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getFetchDirection(); } }); @@ -1006,46 +1028,46 @@ public void testAutogenerated() throws Exception { SQLException.class, "Invalid autoGeneratedKeys value"); - assert !conn.getMetaData().supportsGetGeneratedKeys(); + assertFalse(conn.getMetaData().supportsGetGeneratedKeys()); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getGeneratedKeys(); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate("select 1", Statement.RETURN_GENERATED_KEYS); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate("select 1", new int[] {1, 2}); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate("select 1", new String[] {"a", "b"}); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.execute("select 1", Statement.RETURN_GENERATED_KEYS); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.execute("select 1", new int[] {1, 2}); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.execute("select 1", new String[] {"a", "b"}); } }); @@ -1115,7 +1137,7 @@ public void testStatementTypeMismatchSelectForCachedQuery() throws Exception { SQLException.class, "Given statement type does not match that declared by JDBC driver"); - assert stmt.getResultSet() == null : "Not results expected. Last statement is executed with exception"; + assertNull("Not results expected. Last statement is executed with exception", stmt.getResultSet()); } /** @@ -1137,18 +1159,20 @@ public void testStatementTypeMismatchUpdate() throws Exception { boolean next = rs.next(); - assert next; + assertTrue(next); - assert rs.getInt(1) == 1 : "The data must not be updated. " + + assertEquals("The data must not be updated. " + "Because update statement is executed via 'executeQuery' method." + - " Data [val=" + rs.getInt(1) + ']'; + " Data [val=" + rs.getInt(1) + ']', + 1, + rs.getInt(1)); } /** */ private void fillCache() { IgniteCache cachePerson = grid(0).cache(DEFAULT_CACHE_NAME); - assert cachePerson != null; + assertNotNull(cachePerson); cachePerson.put("p1", new Person(1, "John", "White", 25)); cachePerson.put("p2", new Person(2, "Joe", "Black", 35)); @@ -1229,4 +1253,4 @@ private Person(int id, String firstName, String lastName, int age) { this.age = age; } } -} \ No newline at end of file +} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/PdsWithTtlCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/PdsWithTtlCompatibilityTest.java deleted file mode 100644 index 946caddb5f203..0000000000000 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/PdsWithTtlCompatibilityTest.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.compatibility; - -import java.util.Collection; -import java.util.concurrent.TimeUnit; -import javax.cache.Cache; -import javax.cache.expiry.AccessedExpiryPolicy; -import javax.cache.expiry.Duration; -import org.apache.ignite.Ignite; -import org.apache.ignite.IgniteCache; -import org.apache.ignite.cache.CacheAtomicityMode; -import org.apache.ignite.cache.CacheWriteSynchronizationMode; -import org.apache.ignite.compatibility.persistence.IgnitePersistenceCompatibilityAbstractTest; -import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.configuration.DataRegionConfiguration; -import org.apache.ignite.configuration.DataStorageConfiguration; -import org.apache.ignite.configuration.IgniteConfiguration; -import org.apache.ignite.configuration.MemoryConfiguration; -import org.apache.ignite.configuration.PersistentStoreConfiguration; -import org.apache.ignite.configuration.WALMode; -import org.apache.ignite.internal.IgniteEx; -import org.apache.ignite.internal.IgniteInterruptedCheckedException; -import org.apache.ignite.internal.processors.cache.GridCacheAbstractFullApiSelfTest; -import org.apache.ignite.internal.processors.cache.persistence.migration.UpgradePendingTreeToPerPartitionTask; -import org.apache.ignite.internal.util.typedef.PA; -import org.apache.ignite.lang.IgniteFuture; -import org.apache.ignite.lang.IgniteInClosure; -import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; -import org.apache.ignite.testframework.GridTestUtils; - -/** - * Test PendingTree upgrading to per-partition basis. Test fill cache with persistence enabled and with ExpirePolicy - * configured on ignite-2.1 version and check if entries will be correctly expired when a new version node started. - * - * Note: Test for ignite-2.3 version will always fails due to entry ttl update fails with assertion on checkpoint lock - * check. - */ -public class PdsWithTtlCompatibilityTest extends IgnitePersistenceCompatibilityAbstractTest { - /** */ - static final String TEST_CACHE_NAME = PdsWithTtlCompatibilityTest.class.getSimpleName(); - - /** */ - static final int DURATION_SEC = 10; - - /** */ - private static final int ENTRIES_CNT = 100; - - /** {@inheritDoc} */ - @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { - IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); - - cfg.setPeerClassLoadingEnabled(false); - - cfg.setDataStorageConfiguration( - new DataStorageConfiguration() - .setDefaultDataRegionConfiguration( - new DataRegionConfiguration() - .setMaxSize(32L * 1024 * 1024) - .setPersistenceEnabled(true) - .setCheckpointPageBufferSize(16L * 1024 * 1024) - ).setWalMode(WALMode.LOG_ONLY)); - - return cfg; - } - - /** - * Tests opportunity to read data from previous Ignite DB version. - * - * @throws Exception If failed. - */ - public void testNodeStartByOldVersionPersistenceData_2_1() throws Exception { - doTestStartupWithOldVersion("2.1.0"); - } - - /** - * Tests opportunity to read data from previous Ignite DB version. - * - * @param igniteVer 3-digits version of ignite - * @throws Exception If failed. - */ - protected void doTestStartupWithOldVersion(String igniteVer) throws Exception { - try { - startGrid(1, igniteVer, new ConfigurationClosure(), new PostStartupClosure()); - - stopAllGrids(); - - IgniteEx ignite = startGrid(0); - - assertEquals(1, ignite.context().discovery().topologyVersion()); - - ignite.active(true); - - validateResultingCacheData(ignite, ignite.cache(TEST_CACHE_NAME)); - } - finally { - stopAllGrids(); - } - } - - /** - * @param cache to be filled by different keys and values. Results may be validated in {@link - * #validateResultingCacheData(Ignite, IgniteCache)}. - */ - public static void saveCacheData(Cache cache) { - for (int i = 0; i < ENTRIES_CNT; i++) - cache.put(i, "data-" + i); - - //Touch - for (int i = 0; i < ENTRIES_CNT; i++) - assertNotNull(cache.get(i)); - } - - /** - * Asserts cache contained all expected values as it was saved before. - * - * @param cache cache should be filled using {@link #saveCacheData(Cache)}. - */ - public static void validateResultingCacheData(Ignite ignite, - IgniteCache cache) throws IgniteInterruptedCheckedException { - - final long expireTime = System.currentTimeMillis() + TimeUnit.SECONDS.toMillis(DURATION_SEC + 1); - - final IgniteFuture> future = ignite.compute().broadcastAsync(new UpgradePendingTreeToPerPartitionTask()); - - GridTestUtils.waitForCondition(new PA() { - @Override public boolean apply() { - return future.isDone() && expireTime < System.currentTimeMillis(); - } - }, TimeUnit.SECONDS.toMillis(DURATION_SEC + 2)); - - for (Boolean res : future.get()) - assertTrue(res); - - for (int i = 0; i < ENTRIES_CNT; i++) - assertNull(cache.get(i)); - } - - /** */ - public static class ConfigurationClosure implements IgniteInClosure { - /** {@inheritDoc} */ - @Override public void apply(IgniteConfiguration cfg) { - cfg.setLocalHost("127.0.0.1"); - - TcpDiscoverySpi disco = new TcpDiscoverySpi(); - disco.setIpFinder(GridCacheAbstractFullApiSelfTest.LOCAL_IP_FINDER); - - cfg.setDiscoverySpi(disco); - - cfg.setPeerClassLoadingEnabled(false); - - cfg.setMemoryConfiguration(new MemoryConfiguration().setDefaultMemoryPolicySize(256L * 1024 * 1024)); - cfg.setPersistentStoreConfiguration(new PersistentStoreConfiguration().setWalMode(WALMode.LOG_ONLY) - .setCheckpointingPageBufferSize(16L * 1024 * 1024)); - } - } - - /** */ - public static class PostStartupClosure implements IgniteInClosure { - /** {@inheritDoc} */ - @Override public void apply(Ignite ignite) { - ignite.active(true); - - CacheConfiguration cacheCfg = new CacheConfiguration<>(); - cacheCfg.setName(TEST_CACHE_NAME); - cacheCfg.setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL); - cacheCfg.setBackups(1); - cacheCfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC); - cacheCfg.setExpiryPolicyFactory(AccessedExpiryPolicy.factoryOf(new Duration(TimeUnit.SECONDS, DURATION_SEC))); - cacheCfg.setEagerTtl(true); - cacheCfg.setGroupName("myGroup"); - - IgniteCache cache = ignite.createCache(cacheCfg); - - saveCacheData(cache); - - ignite.active(false); - } - } -} diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java index fcfd5a7939ca4..eaa38afdd6d61 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testsuites/IgniteCompatibilityBasicTestSuite.java @@ -19,7 +19,6 @@ import junit.framework.TestSuite; import org.apache.ignite.compatibility.persistence.DummyPersistenceCompatibilityTest; -import org.apache.ignite.compatibility.PdsWithTtlCompatibilityTest; import org.apache.ignite.compatibility.persistence.FoldersReuseCompatibilityTest; import org.apache.ignite.compatibility.persistence.IgniteUuidCompatibilityTest; import org.apache.ignite.compatibility.persistence.MigratingToWalV2SerializerWithCompactionTest; @@ -37,8 +36,6 @@ public static TestSuite suite() throws Exception { suite.addTestSuite(DummyPersistenceCompatibilityTest.class); - suite.addTestSuite(PdsWithTtlCompatibilityTest.class); - suite.addTestSuite(FoldersReuseCompatibilityTest.class); suite.addTestSuite(MigratingToWalV2SerializerWithCompactionTest.class); diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java index 4f8d06238099f..308baa993c487 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java @@ -89,8 +89,12 @@ public final class IgniteSystemProperties { /** * If this system property is set to {@code false} - no checks for new versions will * be performed by Ignite. By default, Ignite periodically checks for the new - * version and prints out the message into the log if new version of Ignite is + * version and prints out the message into the log if a new version of Ignite is * available for download. + * + * Update notifier enabled flag is a cluster-wide value and determined according to the local setting + * during the start of the first node in the cluster. The chosen value will survive the first node shutdown + * and will override the property value on all newly joining nodes. */ public static final String IGNITE_UPDATE_NOTIFIER = "IGNITE_UPDATE_NOTIFIER"; @@ -785,8 +789,10 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_WAL_LOG_TX_RECORDS = "IGNITE_WAL_LOG_TX_RECORDS"; - /** If this property is set, {@link DataStorageConfiguration#writeThrottlingEnabled} will be overridden to true - * independent of initial value in configuration. */ + /** + * If this property is set, {@link DataStorageConfiguration#isWriteThrottlingEnabled()} + * will be overridden to {@code true} regardless the initial value in the configuration. + */ public static final String IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED = "IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED"; /** @@ -882,6 +888,24 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_LOADED_PAGES_BACKWARD_SHIFT_MAP = "IGNITE_LOADED_PAGES_BACKWARD_SHIFT_MAP"; + /** + * Property for setup percentage of archive size for checkpoint trigger. Default value is 0.25 + */ + public static final String IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE = "IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE"; + + /** + * Property for setup percentage of WAL archive size to calculate threshold since which removing of old archive should be started. + * Default value is 0.5 + */ + public static final String IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE = "IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE"; + + /** + * Threshold time (in millis) to print warning to log if waiting for next wal segment took longer than the threshold. + * + * Default value is 1000 ms. + */ + public static final String IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT = "IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT"; + /** * Count of WAL compressor worker threads. Default value is 4. */ @@ -1121,6 +1145,45 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP = "IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP"; + /** + * Index rebuilding parallelism level. If specified, sets the count of threads that are used for index rebuilding + * and can only be greater than 0, otherwise default value will be used. Maximum count of threads + * can't be greater than total available processors count. + * Default value is minimum of 4 and processors count / 4, but always greater than 0. + */ + public static final String INDEX_REBUILDING_PARALLELISM = "INDEX_REBUILDING_PARALLELISM"; + + /** Enable write rebalnce statistics into log. Default: false */ + public static final String IGNITE_WRITE_REBALANCE_STATISTICS = "IGNITE_WRITE_REBALANCE_STATISTICS"; + + /** Enable write rebalnce statistics by partitions into log. Default: false */ + public static final String IGNITE_WRITE_REBALANCE_PARTITION_STATISTICS = + "IGNITE_WRITE_REBALANCE_PARTITION_STATISTICS"; + + /** + * Threshold timeout for long transactions, if transaction exceeds it, it will be dumped in log with + * information about how much time did it spent in system time (time while aquiring locks, preparing, + * commiting, etc) and user time (time when client node runs some code while holding transaction and not + * waiting it). Equals 0 if not set. No long transactions are dumped in log if nor this parameter + * neither {@link #IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT} is set. + */ + public static final String IGNITE_LONG_TRANSACTION_TIME_DUMP_THRESHOLD = "IGNITE_LONG_TRANSACTION_TIME_DUMP_THRESHOLD"; + + /** + * The coefficient for samples of completed transactions that will be dumped in log. Must be float value + * between 0.0 and 1.0 inclusive. Default value is 0.0. + */ + public static final String IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT = + "IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT"; + + /** + * The limit of samples of completed transactions that will be dumped in log per second, if + * {@link #IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT} is above 0.0. Must be integer value + * greater than 0. Default value is 5. + */ + public static final String IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_PER_SECOND_LIMIT = + "IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_PER_SECOND_LIMIT"; + /** * Enforces singleton. */ diff --git a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java index 81fd50b8ee5fa..e20224746991b 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java @@ -686,13 +686,10 @@ private static QueryEntity convert(QueryEntityTypeDescriptor desc) { * @return Type descriptor. */ private static QueryEntityTypeDescriptor processKeyAndValueClasses( - Class keyCls, - Class valCls + @NotNull Class keyCls, + @NotNull Class valCls ) { - QueryEntityTypeDescriptor d = new QueryEntityTypeDescriptor(); - - d.keyClass(keyCls); - d.valueClass(valCls); + QueryEntityTypeDescriptor d = new QueryEntityTypeDescriptor(keyCls, valCls); processAnnotationsInClass(true, d.keyClass(), d, null); processAnnotationsInClass(false, d.valueClass(), d, null); diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java b/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java index 549be542fdedf..b0ec17016a60d 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java @@ -30,22 +30,22 @@ /** * API for configuring continuous cache queries. *

- * Continuous queries allow to register a remote filter and a local listener + * Continuous queries allow registering a remote filter and a local listener * for cache updates. If an update event passes the filter, it will be sent to - * the node that executed the query and local listener will be notified. + * the node that executed the query, and local listener will be notified. *

- * Additionally, you can execute initial query to get currently existing data. + * Additionally, you can execute an initial query to get currently existing data. * Query can be of any type (SQL, TEXT or SCAN) and can be set via {@link #setInitialQuery(Query)} * method. *

* Query can be executed either on all nodes in topology using {@link IgniteCache#query(Query)} * method, or only on the local node, if {@link Query#setLocal(boolean)} parameter is set to {@code true}. - * Note that in case query is distributed and a new node joins, it will get the remote - * filter for the query during discovery process before it actually joins topology, + * Note that if the query is distributed and a new node joins, it will get the remote + * filter for the query during discovery process before it actually joins a topology, * so no updates will be missed. *

Example

- * As an example, suppose we have cache with {@code 'Person'} objects and we need - * to query all persons with salary above 1000. + * As an example, suppose we have a cache with {@code 'Person'} objects and we need + * to query for all people with salary above 1000. *

* Here is the {@code Person} class: *

@@ -60,17 +60,17 @@
  * }
  * 
*

- * You can create and execute continuous query like so: + * You can create and execute a continuous query like so: *

- * // Create new continuous query.
+ * // Create a new continuous query.
  * ContinuousQuery<Long, Person> qry = new ContinuousQuery<>();
  *
- * // Initial iteration query will return all persons with salary above 1000.
+ * // Initial iteration query will return all people with salary above 1000.
  * qry.setInitialQuery(new ScanQuery<>((id, p) -> p.getSalary() > 1000));
  *
  *
  * // Callback that is called locally when update notifications are received.
- * // It simply prints out information about all created persons.
+ * // It simply prints out information about all created or modified records.
  * qry.setLocalListener((evts) -> {
  *     for (CacheEntryEvent<? extends Long, ? extends Person> e : evts) {
  *         Person p = e.getValue();
@@ -79,29 +79,29 @@
  *     }
  * });
  *
- * // Continuous listener will be notified for persons with salary above 1000.
+ * // The continuous listener will be notified for people with salary above 1000.
  * qry.setRemoteFilter(evt -> evt.getValue().getSalary() > 1000);
  *
- * // Execute query and get cursor that iterates through initial data.
+ * // Execute the query and get a cursor that iterates through the initial data.
  * QueryCursor<Cache.Entry<Long, Person>> cur = cache.query(qry);
  * 
- * This will execute query on all nodes that have cache you are working with and - * listener will start to receive notifications for cache updates. + * This will execute query on all nodes that have the cache you are working with and + * listener will start receiving notifications for cache updates. *

* To stop receiving updates call {@link QueryCursor#close()} method: *

  * cur.close();
  * 
- * Note that this works even if you didn't provide initial query. Cursor will + * Note that this works even if you didn't provide the initial query. Cursor will * be empty in this case, but it will still unregister listeners when {@link QueryCursor#close()} * is called. *

* {@link IgniteAsyncCallback} annotation is supported for {@link CacheEntryEventFilter} * (see {@link #setRemoteFilterFactory(Factory)}) and {@link CacheEntryUpdatedListener} * (see {@link #setLocalListener(CacheEntryUpdatedListener)}). - * If filter and/or listener are annotated with {@link IgniteAsyncCallback} then annotated callback - * is executed in async callback pool (see {@link IgniteConfiguration#getAsyncCallbackPoolSize()}) - * and notification order is kept the same as update order for given cache key. + * If a filter and/or listener are annotated with {@link IgniteAsyncCallback} then the annotated callback + * is executed in an async callback pool (see {@link IgniteConfiguration#getAsyncCallbackPoolSize()}) + * and a notification order is kept the same as an update order for a given cache key. * * @see ContinuousQueryWithTransformer * @see IgniteAsyncCallback @@ -130,10 +130,10 @@ public ContinuousQuery setInitialQuery(Query> initQry) { } /** - * Sets local callback. This callback is called only in local node when new updates are received. + * Sets a local callback. This callback is called only on local node when new updates are received. *

- * The callback predicate accepts ID of the node from where updates are received and collection - * of received entries. Note that for removed entries value will be {@code null}. + * The callback predicate accepts ID of the node from where updates are received and a collection + * of the received entries. Note that for removed entries values will be {@code null}. *

* If the predicate returns {@code false}, query execution will be cancelled. *

@@ -141,7 +141,7 @@ public ContinuousQuery setInitialQuery(Query> initQry) { * synchronization or transactional cache operations), should be executed asynchronously without * blocking the thread that called the callback. Otherwise, you can get deadlocks. *

- * If local listener are annotated with {@link IgniteAsyncCallback} then it is executed in async callback pool + * If local listener are annotated with {@link IgniteAsyncCallback} then it is executed in an async callback pool * (see {@link IgniteConfiguration#getAsyncCallbackPoolSize()}) that allow to perform a cache operations. * * @param locLsnr Local callback. @@ -157,8 +157,6 @@ public ContinuousQuery setLocalListener(CacheEntryUpdatedListener lo } /** - * Gets local listener. - * * @return Local listener. */ public CacheEntryUpdatedListener getLocalListener() { @@ -214,7 +212,7 @@ public ContinuousQuery setAutoUnsubscribe(boolean autoUnsubscribe) { } /** - * Sets whether this query should be executed on local node only. + * Sets whether this query should be executed on a local node only. * * Note: backup event queues are not kept for local continuous queries. It may lead to loss of notifications in case * of node failures. Use {@link ContinuousQuery#setRemoteFilterFactory(Factory)} to register cache event listeners diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java b/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java index 7db42d3b239cf..46c79cb41cb25 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java @@ -49,11 +49,12 @@ public class DefaultCommunicationFailureResolver implements CommunicationFailure if (largestCluster == null) return; - log.info("Communication problem resolver found fully connected independent cluster [" - + "serverNodesCnt=" + largestCluster.srvNodesCnt + ", " - + "clientNodesCnt=" + largestCluster.connectedClients.size() + ", " - + "totalAliveNodes=" + ctx.topologySnapshot().size() + ", " - + "serverNodesIds=" + clusterNodeIds(largestCluster.srvNodesSet, ctx.topologySnapshot(), 1000) + "]"); + if (log.isInfoEnabled()) + log.info("Communication problem resolver found fully connected independent cluster [" + + "serverNodesCnt=" + largestCluster.srvNodesCnt + ", " + + "clientNodesCnt=" + largestCluster.connectedClients.size() + ", " + + "totalAliveNodes=" + ctx.topologySnapshot().size() + ", " + + "serverNodesIds=" + clusterNodeIds(largestCluster.srvNodesSet, ctx.topologySnapshot(), 1000) + "]"); keepCluster(ctx, largestCluster); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java index 2cb3dfad5e487..6ce9001138b1b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java @@ -234,7 +234,7 @@ public ComputeTaskSession getTaskSession() { /** {@inheritDoc} */ @Override public boolean cancel() throws IgniteCheckedException { - ctx.security().authorize(ses.getTaskName(), SecurityPermission.TASK_CANCEL, null); + ctx.security().authorize(ses.getTaskName(), SecurityPermission.TASK_CANCEL); if (onCancelled()) { ctx.task().onCancelled(ses.getId()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java index ac568f065bcc7..7d9f74e5e6273 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java @@ -44,6 +44,8 @@ import org.apache.ignite.internal.processors.continuous.GridContinuousBatchAdapter; import org.apache.ignite.internal.processors.continuous.GridContinuousHandler; import org.apache.ignite.internal.processors.platform.PlatformEventFilterListener; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.P2; import org.apache.ignite.internal.util.typedef.T2; @@ -92,6 +94,9 @@ class GridEventConsumeHandler implements GridContinuousHandler { /** Listener. */ private GridLocalEventListener lsnr; + /** P2P unmarshalling future. */ + private IgniteInternalFuture p2pUnmarshalFut = new GridFinishedFuture<>(); + /** * Required by {@link Externalizable}. */ @@ -142,6 +147,21 @@ public GridEventConsumeHandler() { // No-op. } + /** + * Performs remote filter initialization. + * + * @param filter Remote filter. + * @param ctx Kernal context. + * @throws IgniteCheckedException In case if initialization failed. + */ + private void initFilter(IgnitePredicate filter, GridKernalContext ctx) throws IgniteCheckedException { + if (filter != null) + ctx.resource().injectGeneric(filter); + + if (filter instanceof PlatformEventFilterListener) + ((PlatformEventFilterListener)filter).initialize(ctx); + } + /** {@inheritDoc} */ @Override public RegisterStatus register(final UUID nodeId, final UUID routineId, final GridKernalContext ctx) throws IgniteCheckedException { @@ -152,12 +172,6 @@ public GridEventConsumeHandler() { if (cb != null) ctx.resource().injectGeneric(cb); - if (filter != null) - ctx.resource().injectGeneric(filter); - - if (filter instanceof PlatformEventFilterListener) - ((PlatformEventFilterListener)filter).initialize(ctx); - final boolean loc = nodeId.equals(ctx.localNodeId()); lsnr = new GridLocalEventListener() { @@ -257,7 +271,18 @@ public GridEventConsumeHandler() { if (F.isEmpty(types)) types = EVTS_ALL; - ctx.event().addLocalEventListener(lsnr, types); + p2pUnmarshalFut.listen((fut) -> { + if (fut.error() == null) { + try { + initFilter(filter, ctx); + } + catch (IgniteCheckedException e) { + throw F.wrap(e); + } + + ctx.event().addLocalEventListener(lsnr, types); + } + }); return RegisterStatus.REGISTERED; } @@ -382,13 +407,22 @@ public GridEventConsumeHandler() { assert ctx.config().isPeerClassLoadingEnabled(); if (filterBytes != null) { - GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, - depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); + try { + GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, + depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); - if (dep == null) - throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + if (dep == null) + throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + + filter = U.unmarshal(ctx, filterBytes, U.resolveClassLoader(dep.classLoader(), ctx.config())); - filter = U.unmarshal(ctx, filterBytes, U.resolveClassLoader(dep.classLoader(), ctx.config())); + ((GridFutureAdapter)p2pUnmarshalFut).onDone(); + } + catch (IgniteCheckedException e) { + ((GridFutureAdapter)p2pUnmarshalFut).onDone(e); + + throw e; + } } } @@ -449,6 +483,7 @@ public GridEventConsumeHandler() { boolean b = in.readBoolean(); if (b) { + p2pUnmarshalFut = new GridFutureAdapter<>(); filterBytes = U.readByteArray(in); clsName = U.readString(in); depInfo = (GridDeploymentInfo)in.readObject(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java index c38486a27a72d..53c7230b79277 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java @@ -61,7 +61,7 @@ import org.apache.ignite.internal.processors.resource.GridResourceProcessor; import org.apache.ignite.internal.processors.rest.GridRestProcessor; import org.apache.ignite.internal.processors.schedule.IgniteScheduleProcessorAdapter; -import org.apache.ignite.internal.processors.security.GridSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurity; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; import org.apache.ignite.internal.processors.service.GridServiceProcessor; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; @@ -405,11 +405,11 @@ public interface GridKernalContext extends Iterable { public GridCollisionManager collision(); /** - * Gets authentication processor. + * Gets instance of {@link IgniteSecurity}. * - * @return Authentication processor. + * @return Ignite security. */ - public GridSecurityProcessor security(); + public IgniteSecurity security(); /** * Gets load balancing manager. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java index b05d10913cb9d..486888448206d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java @@ -80,7 +80,7 @@ import org.apache.ignite.internal.processors.resource.GridResourceProcessor; import org.apache.ignite.internal.processors.rest.GridRestProcessor; import org.apache.ignite.internal.processors.schedule.IgniteScheduleProcessorAdapter; -import org.apache.ignite.internal.processors.security.GridSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurity; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; import org.apache.ignite.internal.processors.service.GridServiceProcessor; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; @@ -157,7 +157,7 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable /** */ @GridToStringExclude - private GridSecurityProcessor securityProc; + private IgniteSecurity security; /** */ @GridToStringExclude @@ -567,8 +567,6 @@ else if (comp instanceof GridFailoverManager) failoverMgr = (GridFailoverManager)comp; else if (comp instanceof GridCollisionManager) colMgr = (GridCollisionManager)comp; - else if (comp instanceof GridSecurityProcessor) - securityProc = (GridSecurityProcessor)comp; else if (comp instanceof GridLoadBalancerManager) loadMgr = (GridLoadBalancerManager)comp; else if (comp instanceof GridIndexingManager) @@ -643,6 +641,8 @@ else if (comp instanceof GridInternalSubscriptionProcessor) internalSubscriptionProc = (GridInternalSubscriptionProcessor)comp; else if (comp instanceof IgniteAuthenticationProcessor) authProc = (IgniteAuthenticationProcessor)comp; + else if (comp instanceof IgniteSecurity) + security = (IgniteSecurity)comp; else if (comp instanceof DiagnosticProcessor) diagnosticProcessor = (DiagnosticProcessor)comp; else if (!(comp instanceof DiscoveryNodeValidationProcessor @@ -803,8 +803,8 @@ else if (helper instanceof HadoopHelper) } /** {@inheritDoc} */ - @Override public GridSecurityProcessor security() { - return securityProc; + @Override public IgniteSecurity security() { + return security; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java index c146eca255aba..688ca17fad53b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java @@ -25,12 +25,15 @@ import java.util.Map; import java.util.UUID; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.internal.managers.deployment.GridDeployment; import org.apache.ignite.internal.managers.deployment.GridDeploymentInfoBean; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.continuous.GridContinuousBatch; import org.apache.ignite.internal.processors.continuous.GridContinuousBatchAdapter; import org.apache.ignite.internal.processors.continuous.GridContinuousHandler; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.lang.GridPeerDeployAware; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.S; @@ -66,6 +69,9 @@ public class GridMessageListenHandler implements GridContinuousHandler { /** */ private boolean depEnabled; + /** P2P unmarshalling future. */ + private IgniteInternalFuture p2pUnmarshalFut = new GridFinishedFuture<>(); + /** * Required by {@link Externalizable}. */ @@ -84,22 +90,6 @@ public GridMessageListenHandler(@Nullable Object topic, IgniteBiPredicate { + if (fut.error() == null) + ctx.io().addUserMessageListener(topic, pred, nodeId); + }); return RegisterStatus.REGISTERED; } @@ -180,18 +172,27 @@ public GridMessageListenHandler(GridMessageListenHandler orig) { assert ctx != null; assert ctx.config().isPeerClassLoadingEnabled(); - GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, - depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); + try { + GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, + depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); - if (dep == null) - throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + if (dep == null) + throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + + ClassLoader ldr = dep.classLoader(); - ClassLoader ldr = dep.classLoader(); + if (topicBytes != null) + topic = U.unmarshal(ctx, topicBytes, U.resolveClassLoader(ldr, ctx.config())); - if (topicBytes != null) - topic = U.unmarshal(ctx, topicBytes, U.resolveClassLoader(ldr, ctx.config())); + pred = U.unmarshal(ctx, predBytes, U.resolveClassLoader(ldr, ctx.config())); + } + catch (IgniteCheckedException | IgniteException e) { + ((GridFutureAdapter)p2pUnmarshalFut).onDone(e); + + throw e; + } - pred = U.unmarshal(ctx, predBytes, U.resolveClassLoader(ldr, ctx.config())); + ((GridFutureAdapter)p2pUnmarshalFut).onDone(); } /** {@inheritDoc} */ @@ -250,6 +251,7 @@ public GridMessageListenHandler(GridMessageListenHandler orig) { depEnabled = in.readBoolean(); if (depEnabled) { + p2pUnmarshalFut = new GridFutureAdapter<>(); topicBytes = U.readByteArray(in); predBytes = U.readByteArray(in); clsName = U.readString(in); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java index 030e2dbe05956..f19fd4e56142f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java @@ -172,6 +172,9 @@ public IgniteEventsImpl(GridKernalContext ctx, ClusterGroupAdapter prj, boolean autoUnsubscribe, prj.predicate())); } + catch (IgniteCheckedException e) { + throw U.convertException(e); + } finally { unguard(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java index 188a538a998cd..8f7320332ba92 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java @@ -22,6 +22,7 @@ import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; import org.apache.ignite.spi.communication.tcp.messages.HandshakeWaitMessage; +import static org.apache.ignite.IgniteSystemProperties.getBoolean; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IGNITE_FEATURES; /** @@ -56,7 +57,16 @@ public enum IgniteFeatures { FIND_AND_DELETE_GARBAGE_COMMAND(8), /** Supports tracking update counter for transactions. */ - TX_TRACKING_UPDATE_COUNTER(12); + TX_TRACKING_UPDATE_COUNTER(12), + + /** Distributed metastorage. */ + IGNITE_SECURITY_PROCESSOR(13), + + /** Replacing TcpDiscoveryNode field with nodeId field in discovery messages. */ + TCP_DISCOVERY_MESSAGE_NODE_COMPACT_REPRESENTATION(14), + + /** LRT system and user time dump settings. */ + LRT_SYSTEM_USER_TIME_DUMP_SETTINGS(18); /** * Unique feature identifier. @@ -139,6 +149,10 @@ public static byte[] allFeatures() { final BitSet set = new BitSet(); for (IgniteFeatures value : IgniteFeatures.values()) { + // After rolling upgrade, our security has more strict validation. This may come as a surprise to customers. + if (IGNITE_SECURITY_PROCESSOR == value && !getBoolean(IGNITE_SECURITY_PROCESSOR.name(), false)) + continue; + final int featureId = value.getFeatureId(); assert !set.get(featureId) : "Duplicate feature ID found for [" + value + "] having same ID [" diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index 867d6039ae0f7..dbe228d30f0a4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -161,6 +161,9 @@ import org.apache.ignite.internal.processors.resource.GridSpringResourceContext; import org.apache.ignite.internal.processors.rest.GridRestProcessor; import org.apache.ignite.internal.processors.security.GridSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurity; +import org.apache.ignite.internal.processors.security.NoOpIgniteSecurityProcessor; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; import org.apache.ignite.internal.processors.service.GridServiceProcessor; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; @@ -171,6 +174,7 @@ import org.apache.ignite.internal.suggestions.JvmConfigurationSuggestions; import org.apache.ignite.internal.suggestions.OsConfigurationSuggestions; import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.TimeBag; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -890,7 +894,8 @@ public void start( @Nullable final Map customExecSvcs, GridAbsClosure errHnd, WorkersRegistry workerRegistry, - Thread.UncaughtExceptionHandler hnd + Thread.UncaughtExceptionHandler hnd, + TimeBag startTimer ) throws IgniteCheckedException { gw.compareAndSet(null, new GridKernalGatewayImpl(cfg.getIgniteInstanceName())); @@ -1079,7 +1084,7 @@ public void start( startProcessor(new GridTimeoutProcessor(ctx)); // Start security processors. - startProcessor(createComponent(GridSecurityProcessor.class, ctx)); + startProcessor(securityProcessor()); // Start SPI managers. // NOTE: that order matters as there are dependencies between managers. @@ -1108,7 +1113,13 @@ public void start( startProcessor(createComponent(DiscoveryNodeValidationProcessor.class, ctx)); startProcessor(new GridAffinityProcessor(ctx)); startProcessor(createComponent(GridSegmentationProcessor.class, ctx)); + + startTimer.finishGlobalStage("Start managers"); + startProcessor(createComponent(IgniteCacheObjectProcessor.class, ctx)); + + startTimer.finishGlobalStage("Configure binary metadata"); + startProcessor(createComponent(IGridClusterStateProcessor.class, ctx)); startProcessor(new IgniteAuthenticationProcessor(ctx)); startProcessor(new GridCacheProcessor(ctx)); @@ -1128,11 +1139,15 @@ public void start( startProcessor(createComponent(PlatformProcessor.class, ctx)); startProcessor(new GridMarshallerMappingProcessor(ctx)); + startTimer.finishGlobalStage("Start processors"); + // Start plugins. for (PluginProvider provider : ctx.plugins().allProviders()) { ctx.add(new GridPluginComponent(provider)); provider.start(ctx.plugins().pluginContextForProvider(provider)); + + startTimer.finishGlobalStage("Start '"+ provider.name() + "' plugin"); } // Start platform plugins. @@ -1143,9 +1158,11 @@ public void start( fillNodeAttributes(clusterProc.updateNotifierEnabled()); - ctx.cache().context().database().startMemoryRestore(ctx); + ctx.cache().context().database().startMemoryRestore(ctx, startTimer); ctx.recoveryMode(false); + + startTimer.finishGlobalStage("Finish recovery"); } catch (Throwable e) { U.error( @@ -1169,6 +1186,8 @@ public void start( gw.writeUnlock(); } + startTimer.finishGlobalStage("Join topology"); + // Check whether physical RAM is not exceeded. checkPhysicalRam(); @@ -1206,6 +1225,8 @@ public void start( else active = joinData.active(); + startTimer.finishGlobalStage("Await transition"); + boolean recon = false; // Callbacks. @@ -1472,6 +1493,19 @@ private long checkPoolStarvation( if (!isDaemon()) ctx.discovery().ackTopology(ctx.discovery().localJoin().joinTopologyVersion().topologyVersion(), EventType.EVT_NODE_JOINED, localNode()); + + startTimer.finishGlobalStage("Await exchange"); + } + + /** + * @return GridProcessor that implements {@link IgniteSecurity} + */ + private GridProcessor securityProcessor() throws IgniteCheckedException { + GridSecurityProcessor prc = createComponent(GridSecurityProcessor.class, ctx); + + return prc != null && prc.enabled() + ? new IgniteSecurityProcessor(ctx, prc) + : new NoOpIgniteSecurityProcessor(ctx, prc); } /** @@ -1652,6 +1686,7 @@ private void suggestOptimizations(IgniteConfiguration cfg) { private void fillNodeAttributes(boolean notifyEnabled) throws IgniteCheckedException { ctx.addNodeAttribute(ATTR_REBALANCE_POOL_SIZE, configuration().getRebalanceThreadPoolSize()); ctx.addNodeAttribute(ATTR_DATA_STREAMER_POOL_SIZE, configuration().getDataStreamerThreadPoolSize()); + ctx.addNodeAttribute("USE_POOL_FOR_LAZY_ATTR", IgniteSystemProperties.getBoolean("USE_POOL_FOR_LAZY")); final String[] incProps = cfg.getIncludeProperties(); @@ -3566,6 +3601,7 @@ public IgniteInternalFuture getOrCreateCacheAsync(String cacheName, String te Ignition.stop(igniteInstanceName, true); } + /** {@inheritDoc} */ @Override public Affinity affinity(String cacheName) { CU.validateCacheName(cacheName); checkClusterState(); @@ -4086,6 +4122,9 @@ private static T createComponent(Class cls, GridKer if (cls.equals(IGridClusterStateProcessor.class)) return (T)new GridClusterStateProcessor(ctx); + if(cls.equals(GridSecurityProcessor.class)) + return null; + Class implCls = null; try { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java index 4c23dd5a24397..8d992a870ade1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java @@ -241,6 +241,9 @@ private void send0(@Nullable Object topic, Collection msgs, boolean async) th false, prj.predicate())); } + catch (IgniteCheckedException e) { + throw U.convertException(e); + } finally { unguard(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java index 641dde3cf34d9..25675659f8fa2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java @@ -17,6 +17,9 @@ package org.apache.ignite.internal; +import javax.management.JMException; +import javax.management.MBeanServer; +import javax.management.ObjectName; import java.io.File; import java.io.IOException; import java.io.InputStream; @@ -46,9 +49,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Handler; -import javax.management.JMException; -import javax.management.MBeanServer; -import javax.management.ObjectName; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; @@ -84,6 +84,7 @@ import org.apache.ignite.internal.util.GridConcurrentHashSet; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.TimeBag; import org.apache.ignite.internal.util.spring.IgniteSpringHelper; import org.apache.ignite.internal.util.typedef.CA; import org.apache.ignite.internal.util.typedef.F; @@ -125,6 +126,7 @@ import org.apache.ignite.thread.IgniteThreadPoolExecutor; import org.jetbrains.annotations.Nullable; +import static java.util.stream.Collectors.joining; import static org.apache.ignite.IgniteState.STARTED; import static org.apache.ignite.IgniteState.STOPPED; import static org.apache.ignite.IgniteState.STOPPED_ON_FAILURE; @@ -1726,7 +1728,17 @@ synchronized void start(GridStartContext startCtx) throws IgniteCheckedException try { starterThread = Thread.currentThread(); - start0(startCtx); + IgniteConfiguration myCfg = initializeConfiguration( + startCtx.config() != null ? startCtx.config() : new IgniteConfiguration() + ); + + TimeBag startNodeTimer = new TimeBag(TimeUnit.MILLISECONDS); + + start0(startCtx, myCfg, startNodeTimer); + + if (log.isInfoEnabled()) + log.info("Node started : " + + startNodeTimer.stagesTimings().stream().collect(joining(",", "[", "]"))); } catch (Exception e) { if (log != null) @@ -1747,27 +1759,24 @@ synchronized void start(GridStartContext startCtx) throws IgniteCheckedException * @throws IgniteCheckedException If start failed. */ @SuppressWarnings({"unchecked", "TooBroadScope"}) - private void start0(GridStartContext startCtx) throws IgniteCheckedException { + private void start0(GridStartContext startCtx, IgniteConfiguration cfg, TimeBag startTimer) + throws IgniteCheckedException { assert grid == null : "Grid is already started: " + name; - IgniteConfiguration cfg = startCtx.config() != null ? startCtx.config() : new IgniteConfiguration(); - - IgniteConfiguration myCfg = initializeConfiguration(cfg); - // Set configuration URL, if any, into system property. if (startCtx.configUrl() != null) System.setProperty(IGNITE_CONFIG_URL, startCtx.configUrl().toString()); // Ensure that SPIs support multiple grid instances, if required. if (!startCtx.single()) { - ensureMultiInstanceSupport(myCfg.getDeploymentSpi()); - ensureMultiInstanceSupport(myCfg.getCommunicationSpi()); - ensureMultiInstanceSupport(myCfg.getDiscoverySpi()); - ensureMultiInstanceSupport(myCfg.getCheckpointSpi()); - ensureMultiInstanceSupport(myCfg.getEventStorageSpi()); - ensureMultiInstanceSupport(myCfg.getCollisionSpi()); - ensureMultiInstanceSupport(myCfg.getFailoverSpi()); - ensureMultiInstanceSupport(myCfg.getLoadBalancingSpi()); + ensureMultiInstanceSupport(cfg.getDeploymentSpi()); + ensureMultiInstanceSupport(cfg.getCommunicationSpi()); + ensureMultiInstanceSupport(cfg.getDiscoverySpi()); + ensureMultiInstanceSupport(cfg.getCheckpointSpi()); + ensureMultiInstanceSupport(cfg.getEventStorageSpi()); + ensureMultiInstanceSupport(cfg.getCollisionSpi()); + ensureMultiInstanceSupport(cfg.getFailoverSpi()); + ensureMultiInstanceSupport(cfg.getLoadBalancingSpi()); } validateThreadPoolSize(cfg.getPublicThreadPoolSize(), "public"); @@ -1919,14 +1928,14 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { "callback", oomeHnd); - if (myCfg.getConnectorConfiguration() != null) { - validateThreadPoolSize(myCfg.getConnectorConfiguration().getThreadPoolSize(), "connector"); + if (cfg.getConnectorConfiguration() != null) { + validateThreadPoolSize(cfg.getConnectorConfiguration().getThreadPoolSize(), "connector"); restExecSvc = new IgniteThreadPoolExecutor( "rest", - myCfg.getIgniteInstanceName(), - myCfg.getConnectorConfiguration().getThreadPoolSize(), - myCfg.getConnectorConfiguration().getThreadPoolSize(), + cfg.getIgniteInstanceName(), + cfg.getConnectorConfiguration().getThreadPoolSize(), + cfg.getConnectorConfiguration().getThreadPoolSize(), DFLT_THREAD_KEEP_ALIVE_TIME, new LinkedBlockingQueue<>(), GridIoPolicy.UNDEFINED, @@ -1936,14 +1945,14 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { restExecSvc.allowCoreThreadTimeOut(true); } - validateThreadPoolSize(myCfg.getUtilityCacheThreadPoolSize(), "utility cache"); + validateThreadPoolSize(cfg.getUtilityCacheThreadPoolSize(), "utility cache"); utilityCacheExecSvc = new IgniteThreadPoolExecutor( "utility", cfg.getIgniteInstanceName(), - myCfg.getUtilityCacheThreadPoolSize(), - myCfg.getUtilityCacheThreadPoolSize(), - myCfg.getUtilityCacheKeepAliveTime(), + cfg.getUtilityCacheThreadPoolSize(), + cfg.getUtilityCacheThreadPoolSize(), + cfg.getUtilityCacheKeepAliveTime(), new LinkedBlockingQueue<>(), GridIoPolicy.UTILITY_CACHE_POOL, oomeHnd); @@ -2024,7 +2033,7 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { } // Register Ignite MBean for current grid instance. - registerFactoryMbean(myCfg.getMBeanServer()); + registerFactoryMbean(cfg.getMBeanServer()); boolean started = false; @@ -2034,8 +2043,10 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { // Init here to make grid available to lifecycle listeners. grid = grid0; + startTimer.finishGlobalStage("Configure system pool"); + grid0.start( - myCfg, + cfg, utilityCacheExecSvc, execSvc, svcExecSvc, @@ -2058,7 +2069,8 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { } }, workerRegistry, - oomeHnd + oomeHnd, + startTimer ); state = STARTED; @@ -2171,8 +2183,10 @@ private IgniteConfiguration initializeConfiguration(IgniteConfiguration cfg) // If user provided IGNITE_HOME - set it as a system property. U.setIgniteHome(ggHome); + String userProvidedWorkDir = cfg.getWorkDirectory(); + // Correctly resolve work directory and set it back to configuration. - String workDir = U.workDirectory(cfg.getWorkDirectory(), ggHome); + String workDir = U.workDirectory(userProvidedWorkDir, ggHome); myCfg.setWorkDirectory(workDir); @@ -2196,6 +2210,9 @@ private IgniteConfiguration initializeConfiguration(IgniteConfiguration cfg) myCfg.setGridLogger(cfgLog); + if(F.isEmpty(userProvidedWorkDir) && F.isEmpty(U.IGNITE_WORK_DIR)) + log.warning("Ignite work directory is not provided, automatically resolved to: " + workDir); + // Check Ignite home folder (after log is available). if (ggHome != null) { File ggHomeFile = new File(ggHome); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/TransactionMetricsMxBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/TransactionMetricsMxBeanImpl.java index 58b58672faa9d..916f5c10d990c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/TransactionMetricsMxBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/TransactionMetricsMxBeanImpl.java @@ -99,6 +99,26 @@ public TransactionMetricsMxBeanImpl(TransactionMetrics transactionMetrics) { @Override public long getOwnerTransactionsNumber() { return transactionMetrics.getOwnerTransactionsNumber(); } + + /** {@inheritDoc} */ + @Override public long getTotalNodeSystemTime() { + return transactionMetrics.getTotalNodeSystemTime(); + } + + /** {@inheritDoc} */ + @Override public long getTotalNodeUserTime() { + return transactionMetrics.getTotalNodeUserTime(); + } + + /** {@inheritDoc} */ + @Override public String getNodeSystemTimeHistogram() { + return transactionMetrics.getNodeSystemTimeHistogram(); + } + + /** {@inheritDoc} */ + @Override public String getNodeUserTimeHistogram() { + return transactionMetrics.getNodeUserTimeHistogram(); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java index 1969d292cd70b..f4304396eb9b9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java @@ -141,6 +141,36 @@ else if ("servers".equals(prj)) ctx.cache().setTxOwnerDumpRequestsAllowed(allowed); } + /** {@inheritDoc} */ + @Override public long getLongTransactionTimeDumpThreshold() { + return ctx.cache().context().tm().longTransactionTimeDumpThreshold(); + } + + /** {@inheritDoc} */ + @Override public void setLongTransactionTimeDumpThreshold(long threshold) { + ctx.cache().longTransactionTimeDumpThreshold(threshold); + } + + /** {@inheritDoc} */ + @Override public double getTransactionTimeDumpSamplesCoefficient() { + return ctx.cache().context().tm().transactionTimeDumpSamplesCoefficient(); + } + + /** {@inheritDoc} */ + @Override public void setTransactionTimeDumpSamplesCoefficient(double coefficient) { + ctx.cache().transactionTimeDumpSamplesCoefficient(coefficient); + } + + /** {@inheritDoc} */ + @Override public int getTransactionTimeDumpSamplesPerSecondLimit() { + return ctx.cache().context().tm().transactionTimeDumpSamplesPerSecondLimit(); + } + + /** {@inheritDoc} */ + @Override public void setTransactionTimeDumpSamplesPerSecondLimit(int limit) { + ctx.cache().longTransactionTimeDumpSamplesPerSecondLimit(limit); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(TransactionsMXBeanImpl.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java index 2c72bb02ed2f5..a3a69e19d9659 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java @@ -30,7 +30,9 @@ import org.jetbrains.annotations.Nullable; /** - * Representation of cluster node that isn't currently present in cluster. + * Representation of cluster node that either isn't currently present in cluster, or semantically detached. + * For example nodes returned from {@code BaselineTopology.currentBaseline()} are always considered as + * semantically detached, even if they are currently present in cluster. */ public class DetachedClusterNode implements ClusterNode { /** */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java index 82779dab6089d..9e76fba697c62 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java @@ -29,7 +29,10 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentLinkedQueue; @@ -409,6 +412,22 @@ private void validateBeforeBaselineChange(Collection bas if (baselineTop.isEmpty()) throw new IgniteException("BaselineTopology must contain at least one node."); + List currBlT = Optional.ofNullable(ctx.state().clusterState().baselineTopology()). + map(BaselineTopology::currentBaseline).orElse(Collections.emptyList()); + + Collection srvrs = ctx.cluster().get().forServers().nodes(); + + for (BaselineNode node : baselineTop) { + Object consistentId = node.consistentId(); + + if (currBlT.stream().noneMatch( + currBlTNode -> Objects.equals(currBlTNode.consistentId(), consistentId)) && + srvrs.stream().noneMatch( + currServersNode -> Objects.equals(currServersNode.consistentId(), consistentId))) + throw new IgniteException("Check arguments. Node with consistent ID [" + consistentId + + "] not found in server nodes."); + } + Collection onlineNodes = onlineBaselineNodesRequestedForRemoval(baselineTop); if (onlineNodes != null) { @@ -470,7 +489,7 @@ private Collection getConsistentIds(Collection n Collection target = new ArrayList<>(top.size()); for (ClusterNode node : top) { - if (!node.isClient()) + if (!node.isClient() && !node.isDaemon()) target.add(node); } @@ -709,7 +728,7 @@ IgniteInternalFuture> startNodesAsync0( Collections.emptyList()); // Exceeding max line width for readability. - GridCompoundFuture> fut = + GridCompoundFuture> fut = new GridCompoundFuture<>(CU.objectsReducer()); AtomicInteger cnt = new AtomicInteger(nodeCallCnt); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java index 152fee0b16478..8fcf071974700 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.commandline; import java.io.File; +import java.time.Duration; import java.time.LocalDateTime; import java.util.Arrays; import java.util.Collections; @@ -43,6 +44,7 @@ import org.apache.ignite.internal.client.ssl.GridSslBasicContextFactory; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.logger.java.JavaLoggerFileHandler; import org.apache.ignite.logger.java.JavaLoggerFormatter; @@ -205,12 +207,14 @@ public CommandHandler(Logger logger) { * @return Exit code. */ public int execute(List rawArgs) { + LocalDateTime startTime = LocalDateTime.now(); + Thread.currentThread().setName("session=" + ses); logger.info("Control utility [ver. " + ACK_VER_STR + "]"); logger.info(COPYRIGHT); logger.info("User: " + System.getProperty("user.name")); - logger.info("Time: " + LocalDateTime.now()); + logger.info("Time: " + startTime); String commandName = ""; @@ -245,7 +249,8 @@ public int execute(List rawArgs) { try { logger.info("Command [" + commandName + "] started"); - logger.info("Arguments: " + String.join(" ", rawArgs)); + logger.info("Arguments: " + argumentsToString(rawArgs)); + logger.info(DELIM); lastOperationRes = command.execute(clientCfg, logger); } @@ -312,12 +317,71 @@ public int execute(List rawArgs) { return EXIT_CODE_UNEXPECTED_ERROR; } finally { + LocalDateTime endTime = LocalDateTime.now(); + + Duration diff = Duration.between(startTime, endTime); + + logger.info("Control utility has completed execution at: " + endTime); + logger.info("Execution time: " + diff.toMillis() + " ms"); + Arrays.stream(logger.getHandlers()) .filter(handler -> handler instanceof FileHandler) .forEach(Handler::close); } } + /** + * @param rawArgs Arguments which user has provided. + * @return String which could be shown in console and pritned to log. + */ + private String argumentsToString(List rawArgs) { + boolean hide = false; + + SB sb = new SB(); + + for (int i = 0; i < rawArgs.size(); i++) { + if (hide) { + sb.a("***** "); + + hide = false; + + continue; + } + + String arg = rawArgs.get(i); + + sb.a(arg).a(' '); + + hide = CommonArgParser.isSensitiveArgument(arg); + } + + return sb.toString(); + } + + /** + * Does one of three things: + *
    + *
  • returns user name from connection parameters if it is there;
  • + *
  • returns user name from client configuration if it is there;
  • + *
  • requests user input and returns entered name.
  • + *
+ * + * @param args Connection parameters. + * @param clientCfg Client configuration. + * @throws IgniteCheckedException If security credetials cannot be provided from client configuration. + */ + private String retrieveUserName( + ConnectionAndSslParameters args, + GridClientConfiguration clientCfg + ) throws IgniteCheckedException { + if (!F.isEmpty(args.userName())) + return args.userName(); + else if (clientCfg.getSecurityCredentialsProvider() == null) + return requestDataFromConsole("user: "); + else + return (String)clientCfg.getSecurityCredentialsProvider().credentials().getLogin(); + } + /** * @param args Common arguments. * @return Thin client configuration to connect to cluster. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java index f8d3372e71fdc..0867cd1761675 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java @@ -96,6 +96,9 @@ public class CommonArgParser { /** List of optional auxiliary commands. */ private static final Set AUX_COMMANDS = new HashSet<>(); + /** Set of sensitive arguments */ + private static final Set SENSITIVE_ARGUMENTS = new HashSet<>(); + static { AUX_COMMANDS.add(CMD_HOST); AUX_COMMANDS.add(CMD_PORT); @@ -119,8 +122,21 @@ public class CommonArgParser { AUX_COMMANDS.add(CMD_TRUSTSTORE); AUX_COMMANDS.add(CMD_TRUSTSTORE_PASSWORD); AUX_COMMANDS.add(CMD_TRUSTSTORE_TYPE); + + SENSITIVE_ARGUMENTS.add(CMD_PASSWORD); + SENSITIVE_ARGUMENTS.add(CMD_KEYSTORE_PASSWORD); + SENSITIVE_ARGUMENTS.add(CMD_TRUSTSTORE_PASSWORD); } + /** + * @param arg To check. + * @return True if provided argument is among sensitive one and not should be displayed. + */ + public static boolean isSensitiveArgument(String arg) { + return SENSITIVE_ARGUMENTS.contains(arg); + } + + /** * @param logger Logger. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/ConnectionAndSslParameters.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ConnectionAndSslParameters.java index befe4510d6e42..b73c0fd739087 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/ConnectionAndSslParameters.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ConnectionAndSslParameters.java @@ -18,6 +18,8 @@ package org.apache.ignite.internal.commandline; import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.util.tostring.GridToStringExclude; +import org.apache.ignite.internal.util.typedef.internal.S; /** * Container with common parsed and validated arguments. @@ -33,6 +35,7 @@ public class ConnectionAndSslParameters { private String user; /** Password. */ + @GridToStringExclude private String pwd; /** Force option is used for auto confirmation. */ @@ -60,6 +63,7 @@ public class ConnectionAndSslParameters { private String sslKeyStoreType; /** Keystore Password. */ + @GridToStringExclude private char[] sslKeyStorePassword; /** Truststore. */ @@ -69,6 +73,7 @@ public class ConnectionAndSslParameters { private String sslTrustStoreType; /** Truststore Password. */ + @GridToStringExclude private char[] sslTrustStorePassword; /** High-level command. */ @@ -259,4 +264,13 @@ public String sslTrustStoreType() { public char[] sslTrustStorePassword() { return sslTrustStorePassword; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(ConnectionAndSslParameters.class, this, + "password", pwd == null ? null : "*****", + "sslKeyStorePassword", sslKeyStorePassword == null ? null: "*****", + "sslTrustStorePassword", sslTrustStorePassword == null? null: "*****" + ); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java index e2489cf0c99a1..2ac9c8794d896 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java @@ -39,6 +39,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_ENABLE_EXPERIMENTAL_COMMAND; import static org.apache.ignite.internal.commandline.CommandArgIterator.isCommandOrOption; +import static org.apache.ignite.internal.commandline.CommandHandler.UTILITY_NAME; import static org.apache.ignite.internal.commandline.CommandList.WAL; import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; @@ -69,13 +70,15 @@ public class WalCommands implements Command> { */ private String walArgs; + /** {@inheritDoc} */ @Override public void printUsage(Logger logger) { - if (IgniteSystemProperties.getBoolean(IGNITE_ENABLE_EXPERIMENTAL_COMMAND, false)) { - Command.usage(logger, "Print absolute paths of unused archived wal segments on each node:", WAL, - WAL_PRINT, "[consistentId1,consistentId2,....,consistentIdN]"); - Command.usage(logger, "Delete unused archived wal segments on each node:", WAL, WAL_DELETE, - "[consistentId1,consistentId2,....,consistentIdN]", optional(CMD_AUTO_CONFIRMATION)); - } + if (!enableExperimental()) + return; + + Command.usage(logger, "Print absolute paths of unused archived wal segments on each node:", WAL, + WAL_PRINT, "[consistentId1,consistentId2,....,consistentIdN]"); + Command.usage(logger, "Delete unused archived wal segments on each node:", WAL, WAL_DELETE, + "[consistentId1,consistentId2,....,consistentIdN]", optional(CMD_AUTO_CONFIRMATION)); } /** @@ -85,21 +88,26 @@ public class WalCommands implements Command> { * @throws Exception If failed to execute wal action. */ @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { - this.logger = logger; + if (enableExperimental()) { + this.logger = logger; - try (GridClient client = Command.startClient(clientCfg)) { - switch (walAct) { - case WAL_DELETE: - deleteUnusedWalSegments(client, walArgs, clientCfg); + try (GridClient client = Command.startClient(clientCfg)) { + switch (walAct) { + case WAL_DELETE: + deleteUnusedWalSegments(client, walArgs, clientCfg); - break; + break; - case WAL_PRINT: - default: - printUnusedWalSegments(client, walArgs, clientCfg); + case WAL_PRINT: + default: + printUnusedWalSegments(client, walArgs, clientCfg); - break; + break; + } } + } else { + logger.warning(String.format("For use experimental command add %s=true to JVM_OPTS in %s", + IGNITE_ENABLE_EXPERIMENTAL_COMMAND, UTILITY_NAME)); } return null; @@ -124,8 +132,10 @@ public class WalCommands implements Command> { ? argIter.nextArg("Unexpected argument for " + WAL.text() + ": " + walAct) : ""; - this.walAct = walAct; - this.walArgs = walArgs; + if (enableExperimental()) { + this.walAct = walAct; + this.walArgs = walArgs; + } } else throw new IllegalArgumentException("Unexpected action " + walAct + " for " + WAL.text()); @@ -268,4 +278,11 @@ private void printDeleteWalSegments0(VisorWalTaskResult taskRes) { @Override public String name() { return WAL.toCommandName(); } + + /** + * @return Value of {@link IgniteSystemProperties#IGNITE_ENABLE_EXPERIMENTAL_COMMAND} + */ + private boolean enableExperimental() { + return IgniteSystemProperties.getBoolean(IGNITE_ENABLE_EXPERIMENTAL_COMMAND, false); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineArguments.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineArguments.java index 395a4ef04ede0..22938856cc061 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineArguments.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineArguments.java @@ -21,6 +21,8 @@ package org.apache.ignite.internal.commandline.baseline; import java.util.List; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; /** * This class contains all possible arguments after parsing baseline command input. @@ -38,6 +40,7 @@ public class BaselineArguments { /** Requested topology version. */ private long topVer = -1; /** List of consistent ids for operation. */ + @GridToStringInclude List consistentIds; /** @@ -92,6 +95,11 @@ public List getConsistentIds() { return consistentIds; } + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(BaselineArguments.class, this); + } + /** * Builder of {@link BaselineArguments}. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java index a7b6b7d8f059f..0c00fbbbac948 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java @@ -42,7 +42,6 @@ import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.HELP; import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.LIST; import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.VALIDATE_INDEXES; -import static org.apache.ignite.spi.discovery.tcp.ipfinder.sharedfs.TcpDiscoverySharedFsIpFinder.DELIM; /** * High-level "cache" command implementation. @@ -160,7 +159,7 @@ protected static void usageCache( Map paramsDesc, String... args ) { - logger.info(INDENT + DELIM); + logger.info(""); logger.info(INDENT + CommandLogger.join(" ", CACHE, cmd, CommandLogger.join(" ", args))); logger.info(DOUBLE_INDENT + description); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java index 458c9a19245ae..10c615d9cfa6f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java @@ -29,6 +29,7 @@ import org.apache.ignite.internal.commandline.CommandArgIterator; import org.apache.ignite.internal.commandline.CommandLogger; import org.apache.ignite.internal.processors.cache.verify.ContentionInfo; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.visor.verify.VisorContentionTask; import org.apache.ignite.internal.visor.verify.VisorContentionTaskArg; import org.apache.ignite.internal.visor.verify.VisorContentionTaskResult; @@ -93,6 +94,11 @@ public int minQueueSize() { public int maxPrint() { return maxPrint; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java index 320f19b6f530f..64552d1b3f584 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTask; import org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTaskArg; import org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTaskResult; +import org.apache.ignite.internal.util.typedef.internal.S; import static org.apache.ignite.internal.commandline.CommandHandler.NULL; import static org.apache.ignite.internal.commandline.CommandLogger.optional; @@ -102,6 +103,11 @@ public UUID nodeId() { public Set getUserAttributes() { return userAttributes; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** Command parsed arguments */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java index fdbf74283d3e8..eb8595da25282 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg; import org.apache.ignite.internal.processors.cache.verify.PartitionKey; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.verify.IndexIntegrityCheckIssue; import org.apache.ignite.internal.visor.verify.IndexValidationIssue; @@ -137,6 +138,11 @@ public int checkThrough() { public UUID nodeId() { return nodeId; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** Command parsed arguments. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java index eb83cf66ce808..55ed3740e3555 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.commandline.argument.CommandArgUtils; import org.apache.ignite.internal.commandline.cache.argument.ListCommandArg; import org.apache.ignite.internal.processors.cache.verify.CacheInfo; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.visor.cache.VisorCacheAffinityConfiguration; @@ -149,6 +150,11 @@ public VisorViewCacheCmd cacheCommand() { * @return Full config flag. */ public boolean fullConfig(){ return fullConfig; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** Command parsed arguments */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java index 0814dcd5f69cb..5006f075cd009 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java @@ -32,6 +32,7 @@ import org.apache.ignite.internal.commandline.CommandLogger; import org.apache.ignite.internal.commandline.argument.CommandArgUtils; import org.apache.ignite.internal.commandline.cache.argument.FindAndDeleteGarbageArg; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceJobResult; import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceTask; import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceTaskArg; @@ -100,6 +101,11 @@ public Set groups() { public boolean delete() { return delete; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** Command parsed arguments. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java index f0a2058c3d1f9..63afa6d894f7b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java @@ -27,8 +27,8 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.logging.Logger; import java.util.function.Consumer; +import java.util.logging.Logger; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; import org.apache.ignite.IgniteException; @@ -46,6 +46,7 @@ import org.apache.ignite.internal.processors.cache.verify.PartitionKey; import org.apache.ignite.internal.processors.cache.verify.VerifyBackupPartitionsTaskV2; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.visor.verify.CacheFilterEnum; import org.apache.ignite.internal.visor.verify.VisorIdleVerifyDumpTask; @@ -179,6 +180,11 @@ public boolean idleCheckCrc() { public boolean isSkipZeros() { return skipZeros; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } /** Command parsed arguments. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java index 65d60a03f7390..cd6dc77677d91 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java @@ -32,6 +32,7 @@ import org.apache.ignite.internal.commandline.TaskExecutor; import org.apache.ignite.internal.commandline.argument.CommandArg; import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.visor.diagnostic.Operation; import org.apache.ignite.internal.visor.diagnostic.VisorPageLocksResult; import org.apache.ignite.internal.visor.diagnostic.VisorPageLocksTask; @@ -43,10 +44,10 @@ import static org.apache.ignite.internal.commandline.CommandLogger.optional; import static org.apache.ignite.internal.commandline.diagnostic.DiagnosticSubCommand.PAGE_LOCKS; import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.ALL; -import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.NODES; -import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.PATH; import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.DUMP; import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.DUMP_LOG; +import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.NODES; +import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.PATH; import static org.apache.ignite.internal.processors.diagnostic.DiagnosticProcessor.DEFAULT_TARGET_FOLDER; /** @@ -76,7 +77,7 @@ public class PageLocksCommand implements Command { }); } - VisorPageLocksTrackerArgs taskArg = new VisorPageLocksTrackerArgs(arguments.op, arguments.filePath, nodeIds); + VisorPageLocksTrackerArgs taskArg = new VisorPageLocksTrackerArgs(arguments.operation, arguments.filePath, nodeIds); res = TaskExecutor.executeTask( client, @@ -180,7 +181,7 @@ private void printResult(Map res) { /** */ public static class Arguments { /** */ - private final Operation op; + private final Operation operation; /** */ private final String filePath; /** */ @@ -189,22 +190,27 @@ public static class Arguments { private final Set nodeIds; /** - * @param op Operation. + * @param operation Operation. * @param filePath File path. * @param allNodes If {@code True} include all available nodes for command. If {@code False} include only subset. * @param nodeIds Node ids. */ public Arguments( - Operation op, + Operation operation, String filePath, boolean allNodes, Set nodeIds ) { - this.op = op; + this.operation = operation; this.filePath = filePath; this.allNodes = allNodes; this.nodeIds = nodeIds; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } } enum PageLocksCommandArg implements CommandArg { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnectionValidationTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnectionValidationTask.java index f4a57bf304ad5..d35bfb86c84f1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnectionValidationTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnectionValidationTask.java @@ -1,11 +1,12 @@ /* - * Copyright 2019 GridGain Systems, Inc. and Contributors. + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at * - * Licensed under the GridGain Community Edition License (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.gridgain.com/products/software/community-edition/gridgain-community-edition-license + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java index 267addb887169..71db0fc8d06f8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java @@ -59,6 +59,7 @@ import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException; import org.apache.ignite.internal.IgniteComponentType; import org.apache.ignite.internal.IgniteDeploymentCheckedException; +import org.apache.ignite.internal.IgniteFeatures; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; import org.apache.ignite.internal.direct.DirectMessageReader; @@ -69,6 +70,8 @@ import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.processors.platform.message.PlatformMessageFilter; import org.apache.ignite.internal.processors.pool.PoolProcessor; +import org.apache.ignite.internal.processors.security.OperationSecurityContext; +import org.apache.ignite.internal.processors.security.SecurityContext; import org.apache.ignite.internal.processors.timeout.GridTimeoutObject; import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashSet; import org.apache.ignite.internal.util.StripedCompositeReadWriteLock; @@ -78,6 +81,7 @@ import org.apache.ignite.internal.util.lang.IgnitePair; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.S; @@ -96,6 +100,7 @@ import org.apache.ignite.spi.communication.CommunicationListener; import org.apache.ignite.spi.communication.CommunicationSpi; import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; @@ -203,11 +208,7 @@ public class GridIoManager extends GridManagerAdapter {}; /** * @param ctx Grid kernal context. @@ -1048,7 +1049,7 @@ private void processP2PMessage( assert obj != null; - invokeListener(msg.policy(), lsnr, nodeId, obj); + invokeListener(msg.policy(), lsnr, nodeId, obj, secSubj(msg)); } finally { threadProcessingMessage(false, null); @@ -1090,7 +1091,11 @@ private void processRegularMessage( processRegularMessage0(msg, nodeId); } - finally { + catch (Throwable e) { + log.error("An error occurred processing the message [msg=" + msg + ", nodeId=" + nodeId + "].", e); + + throw e; + } finally { threadProcessingMessage(false, null); msgC.run(); @@ -1181,7 +1186,7 @@ private void processRegularMessage0(GridIoMessage msg, UUID nodeId) { assert obj != null; - invokeListener(msg.policy(), lsnr, nodeId, obj); + invokeListener(msg.policy(), lsnr, nodeId, obj, secSubj(msg)); } /** @@ -1543,8 +1548,9 @@ private void unwindMessageSet(GridCommunicationMessageSet msgSet, GridMessageLis * @param lsnr Listener. * @param nodeId Node ID. * @param msg Message. + * @param secCtxMsg Security subject that will be used to open a security session. */ - private void invokeListener(Byte plc, GridMessageListener lsnr, UUID nodeId, Object msg) { + private void invokeListener(Byte plc, GridMessageListener lsnr, UUID nodeId, Object msg, @Nullable T2 secCtxMsg) { Byte oldPlc = CUR_PLC.get(); boolean change = !F.eq(oldPlc, plc); @@ -1552,7 +1558,10 @@ private void invokeListener(Byte plc, GridMessageListener lsnr, UUID nodeId, Obj if (change) CUR_PLC.set(plc); - try { + SecurityContext secCtx = secCtxMsg != null ? secCtxMsg.get2() : null; + UUID newSecSubjId = secCtxMsg != null && secCtxMsg.get1() != null ? secCtxMsg.get1() : nodeId; + + try (OperationSecurityContext s = secCtx != null ? ctx.security().withContext(secCtx) : ctx.security().withContext(newSecSubjId)) { lsnr.onMessage(nodeId, msg, plc); } finally { @@ -1614,7 +1623,7 @@ private void send( assert !async || msg instanceof GridIoUserMessage : msg; // Async execution was added only for IgniteMessaging. assert topicOrd >= 0 || !(topic instanceof GridTopic) : msg; - GridIoMessage ioMsg = new GridIoMessage(plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + GridIoMessage ioMsg = createGridIoMessage(topic, topicOrd, msg, plc, ordered, timeout, skipOnTimeout); if (locNodeId.equals(node.id())) { assert plc != P2P_POOL; @@ -1656,6 +1665,38 @@ else if (async) } } + /** + * @return One of two message wrappers. The first is {@link GridIoMessage}, the second is secured version {@link + * GridIoSecurityAwareMessage}. + */ + private @NotNull GridIoMessage createGridIoMessage( + Object topic, + int topicOrd, + Message msg, + byte plc, + boolean ordered, + long timeout, + boolean skipOnTimeout) throws IgniteCheckedException { + boolean securityMsgSupported = IgniteFeatures.allNodesSupports(ctx.discovery().allNodes(), IgniteFeatures.IGNITE_SECURITY_PROCESSOR); + + if (ctx.security().enabled() && securityMsgSupported) { + UUID secSubjId = null; + + SecurityContext secCtx = ctx.security().securityContext(); + UUID curSecSubjId = secCtx.subject().id(); + + if (!locNodeId.equals(curSecSubjId)) + secSubjId = curSecSubjId; + + //Network optimization + byte[] secSubject = secSubjId != null && ctx.discovery().node(secSubjId) == null ? U.marshal(marsh, secCtx) : null; + + return new GridIoSecurityAwareMessage(secSubjId, secSubject, plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + } + + return new GridIoMessage(plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + } + /** * @param nodeId Id of destination node. * @param topic Topic to send the message to. @@ -1964,11 +2005,24 @@ else if (loc) { } /** + * Subscribe at messages from a topic. + * * @param topic Topic to subscribe to. * @param p Message predicate. */ - @SuppressWarnings("unchecked") - public void addUserMessageListener(@Nullable final Object topic, @Nullable final IgniteBiPredicate p) { + public void addUserMessageListener(final @Nullable Object topic, final @Nullable IgniteBiPredicate p) { + addUserMessageListener(topic, p, null); + } + + /** + * @param topic Topic to subscribe to. + * @param p Message predicate. + */ + public void addUserMessageListener( + final @Nullable Object topic, + final @Nullable IgniteBiPredicate p, + final @Nullable UUID nodeId + ) { if (p != null) { try { if (p instanceof PlatformMessageFilter) @@ -1977,7 +2031,7 @@ public void addUserMessageListener(@Nullable final Object topic, @Nullable final ctx.resource().injectGeneric(p); addMessageListener(TOPIC_COMM_USER, - new GridUserMessageListener(topic, (IgniteBiPredicate)p)); + new GridUserMessageListener(topic, (IgniteBiPredicate)p, nodeId)); } catch (IgniteCheckedException e) { throw new IgniteException(e); @@ -1991,13 +2045,8 @@ public void addUserMessageListener(@Nullable final Object topic, @Nullable final */ @SuppressWarnings("unchecked") public void removeUserMessageListener(@Nullable Object topic, IgniteBiPredicate p) { - try { - removeMessageListener(TOPIC_COMM_USER, - new GridUserMessageListener(topic, (IgniteBiPredicate)p)); - } - catch (IgniteCheckedException e) { - throw new IgniteException(e); - } + removeMessageListener(TOPIC_COMM_USER, + new GridUserMessageListener(topic, (IgniteBiPredicate)p)); } /** @@ -2416,15 +2465,27 @@ private class GridUserMessageListener implements GridMessageListener { /** User message topic. */ private final Object topic; + /** Initial node id. */ + private final UUID initNodeId; + /** * @param topic User topic. * @param predLsnr Predicate listener. - * @throws IgniteCheckedException If failed to inject resources to predicates. + * @param initNodeId Node id that registered given listener. */ - GridUserMessageListener(@Nullable Object topic, @Nullable IgniteBiPredicate predLsnr) - throws IgniteCheckedException { + GridUserMessageListener(@Nullable Object topic, @Nullable IgniteBiPredicate predLsnr, + @Nullable UUID initNodeId) { this.topic = topic; this.predLsnr = predLsnr; + this.initNodeId = initNodeId; + } + + /** + * @param topic User topic. + * @param predLsnr Predicate listener. + */ + GridUserMessageListener(@Nullable Object topic, @Nullable IgniteBiPredicate predLsnr) { + this(topic, predLsnr, null); } /** {@inheritDoc} */ @@ -2521,8 +2582,10 @@ private class GridUserMessageListener implements GridMessageListener { if (msgBody != null) { if (predLsnr != null) { - if (!predLsnr.apply(nodeId, msgBody)) - removeMessageListener(TOPIC_COMM_USER, this); + try(OperationSecurityContext s = ctx.security().withContext(initNodeId)) { + if (!predLsnr.apply(nodeId, msgBody)) + removeMessageListener(TOPIC_COMM_USER, this); + } } } } @@ -2749,7 +2812,7 @@ void unwind(GridMessageListener lsnr) { for (GridTuple3 t = msgs.poll(); t != null; t = msgs.poll()) { try { - invokeListener(plc, lsnr, nodeId, t.get1().message()); + invokeListener(plc, lsnr, nodeId, t.get1().message(), secSubj(t.get1())); } finally { if (t.get3() != null) @@ -3145,4 +3208,31 @@ public long binLatencyMcs() { return latencyLimit / (1000 * (resLatency.length - 1)); } } + + /** + * @param msg Communication message. + * @return A pair that represents a security subject id and security context. The returned value can be {@code null} + * in case of security context is not enabled. + */ + private T2 secSubj(GridIoMessage msg) { + if (ctx.security().enabled() && msg instanceof GridIoSecurityAwareMessage) { + GridIoSecurityAwareMessage secMsg = (GridIoSecurityAwareMessage)msg; + + SecurityContext secCtx = null; + + try { + secCtx = secMsg.getSecCtx() != null ? U.unmarshal(marsh, secMsg.getSecCtx(), U.resolveClassLoader(ctx.config())) : null; + } + catch (IgniteCheckedException e) { + log.error("Security context unmarshaled with error.", e); + } + + return new T2<>( + secMsg.secSubjId(), + secCtx + ); + } + + return null; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java index 0b296acf194f2..3c3f2a0f59c8d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java @@ -958,6 +958,11 @@ public GridIoMessageFactory(MessageFactory[] ext) { break; + case GridIoSecurityAwareMessage.TYPE_CODE: + msg = new GridIoSecurityAwareMessage(); + + break; + // [-3..119] [124..129] [-23..-28] [-36..-55] - this // [120..123] - DR // [-4..-22, -30..-35] - SQL diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoSecurityAwareMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoSecurityAwareMessage.java new file mode 100644 index 0000000000000..825644ddb0bf7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoSecurityAwareMessage.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.communication; + +import java.io.Externalizable; +import java.nio.ByteBuffer; +import java.util.UUID; +import org.apache.ignite.plugin.extensions.communication.Message; +import org.apache.ignite.plugin.extensions.communication.MessageReader; +import org.apache.ignite.plugin.extensions.communication.MessageWriter; + +/** + * Represents a security communication message. + */ +public class GridIoSecurityAwareMessage extends GridIoMessage { + /** */ + private static final long serialVersionUID = 0L; + /** */ + public static final short TYPE_CODE = 174; + + /** Security subject id that will be used during message processing on an remote node. */ + private UUID secSubjId; + + /** Security context transmitting from node initiator of action. */ + private byte[] secCtx; + + /** + * No-op constructor to support {@link Externalizable} interface. + * This constructor is not meant to be used for other purposes. + */ + public GridIoSecurityAwareMessage() { + // No-op. + } + + /** + * @param secSubjId Security subject id. + * @param plc Policy. + * @param topic Communication topic. + * @param topicOrd Topic ordinal value. + * @param msg Message. + * @param ordered Message ordered flag. + * @param timeout Timeout. + * @param skipOnTimeout Whether message can be skipped on timeout. + */ + public GridIoSecurityAwareMessage( + UUID secSubjId, + byte[] secSubject, + byte plc, + Object topic, + int topicOrd, + Message msg, + boolean ordered, + long timeout, + boolean skipOnTimeout) { + super(plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + + this.secSubjId = secSubjId; + this.secCtx = secSubject; + } + + /** + * @return Security subject id. + */ + UUID secSubjId() { + return secSubjId; + } + + /** + * @return Security context + */ + public byte[] getSecCtx() { + return secCtx; + } + + /** {@inheritDoc} */ + @Override public short directType() { + return TYPE_CODE; + } + + /** {@inheritDoc} */ + @Override public byte fieldsCount() { + return 9; + } + + /** {@inheritDoc} */ + @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { + writer.setBuffer(buf); + + if (!super.writeTo(buf, writer)) + return false; + + if (!writer.isHeaderWritten()) { + if (!writer.writeHeader(directType(), fieldsCount())) + return false; + + writer.onHeaderWritten(); + } + + switch (writer.state()) { + case 7: + if (!writer.writeByteArray("secCtx", secCtx)) + return false; + + writer.incrementState(); + + case 8: + if (!writer.writeUuid("secSubjId", secSubjId)) + return false; + + writer.incrementState(); + + } + + return true; + } + + /** {@inheritDoc} */ + @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) { + reader.setBuffer(buf); + + if (!reader.beforeMessageRead()) + return false; + + if (!super.readFrom(buf, reader)) + return false; + + switch (reader.state()) { + case 7: + secCtx = reader.readByteArray("secCtx"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 8: + secSubjId = reader.readUuid("secSubjId"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + } + + return reader.afterMessageRead(GridIoSecurityAwareMessage.class); + } +} \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java index 3450aa5195c4a..053cc6cad5566 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java @@ -30,8 +30,10 @@ import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicStampedReference; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.compute.ComputeTask; import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.internal.processors.task.GridInternal; @@ -42,6 +44,7 @@ import org.apache.ignite.internal.util.lang.GridTuple; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; @@ -457,7 +460,7 @@ public Class existingDeployedClass(String clsName) { if (cls == null) { try { - cls = Class.forName(clsName, true, clsLdr); + cls = U.forName(clsName, clsLdr); Class cur = clss.putIfAbsent(clsName, cls); @@ -478,7 +481,7 @@ public Class existingDeployedClass(String clsName) { return cls; else if (!a.equals(clsName)) { try { - cls = Class.forName(a, true, clsLdr); + cls = U.forName(a, clsLdr); } catch (ClassNotFoundException ignored0) { continue; @@ -501,6 +504,10 @@ else if (!a.equals(clsName)) { } } } + catch (IgniteException e) { + if (!X.hasCause(e, TimeoutException.class)) + throw e; + } } return cls; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java index ca9ce328b6b5c..531d6c254f0d3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java @@ -28,7 +28,9 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeoutException; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.DeploymentMode; @@ -37,6 +39,7 @@ import org.apache.ignite.internal.util.GridByteArrayList; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteUuid; @@ -445,6 +448,9 @@ private boolean isLocallyExcluded(String name) { // Catch Throwable to secure against any errors resulted from // corrupted class definitions or other user errors. catch (Exception e) { + if (X.hasCause(e, TimeoutException.class)) + throw e; + throw new ClassNotFoundException("Failed to load class due to unexpected error: " + name, e); } @@ -581,6 +587,8 @@ private GridByteArrayList sendClassRequest(String name, String path) throws Clas IgniteCheckedException err = null; + TimeoutException te = null; + for (UUID nodeId : nodeListCp) { if (nodeId.equals(ctx.discovery().localNode().id())) // Skip local node as it is already used as parent class loader. @@ -598,7 +606,14 @@ private GridByteArrayList sendClassRequest(String name, String path) throws Clas } try { - GridDeploymentResponse res = comm.sendResourceRequest(path, ldrId, node, endTime); + GridDeploymentResponse res = null; + + try { + res = comm.sendResourceRequest(path, ldrId, node, endTime); + } + catch (TimeoutException e) { + te = e; + } if (res == null) { String msg = "Failed to send class-loading request to node (is node alive?) [node=" + @@ -657,12 +672,28 @@ else if (log.isDebugEnabled()) } } + if (te != null) { + err.addSuppressed(te); + + throw new IgniteException(err); + } + throw new ClassNotFoundException("Failed to peer load class [class=" + name + ", nodeClsLdrs=" + nodeLdrMapCp + ", parentClsLoader=" + getParent() + ']', err); } /** {@inheritDoc} */ @Nullable @Override public InputStream getResourceAsStream(String name) { + try { + return getResourceAsStreamEx(name); + } + catch (TimeoutException ignore) { + return null; + } + } + + /** */ + @Nullable public InputStream getResourceAsStreamEx(String name) throws TimeoutException { assert !Thread.holdsLock(mux); if (byteMap != null && name.endsWith(".class")) { @@ -702,7 +733,7 @@ else if (log.isDebugEnabled()) * @param name Resource name. * @return InputStream for resource or {@code null} if resource could not be found. */ - @Nullable private InputStream sendResourceRequest(String name) { + @Nullable private InputStream sendResourceRequest(String name) throws TimeoutException { assert !Thread.holdsLock(mux); long endTime = computeEndTime(p2pTimeout); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java index e14c8dfafcb10..973c51ecfe02f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java @@ -22,6 +22,7 @@ import java.util.Collection; import java.util.HashSet; import java.util.UUID; +import java.util.concurrent.TimeoutException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cluster.ClusterNode; @@ -355,7 +356,7 @@ void sendUndeployRequest(String rsrcName, Collection rmtNodes) thro */ @SuppressWarnings({"SynchronizationOnLocalVariableOrMethodParameter"}) GridDeploymentResponse sendResourceRequest(final String rsrcName, IgniteUuid clsLdrId, - final ClusterNode dstNode, long threshold) throws IgniteCheckedException { + final ClusterNode dstNode, long threshold) throws IgniteCheckedException, TimeoutException { assert rsrcName != null; assert dstNode != null; assert clsLdrId != null; @@ -472,13 +473,21 @@ GridDeploymentResponse sendResourceRequest(final String rsrcName, IgniteUuid cls timeout = threshold - U.currentTimeMillis(); } + + if (timeout <= 0) + throw new TimeoutException(); } catch (InterruptedException e) { // Interrupt again to get it in the users code. Thread.currentThread().interrupt(); - throw new IgniteCheckedException("Got interrupted while waiting for response from node: " + - dstNode.id(), e); + TimeoutException te = new TimeoutException( + "Got interrupted while waiting for response from node: " + dstNode.id() + ); + + te.initCause(e); + + throw te; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java index b27cc4bd0275f..1d36571e7b1bd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java @@ -188,7 +188,7 @@ class GridDeploymentLocalStore extends GridDeploymentStoreAdapter { // Check that class can be loaded. String clsName = meta.className(); - Class cls = Class.forName(clsName != null ? clsName : alias, true, ldr); + Class cls = U.forName(clsName != null ? clsName : alias, ldr); spi.register(ldr, cls); @@ -227,6 +227,11 @@ class GridDeploymentLocalStore extends GridDeploymentStoreAdapter { return dep; } + /** {@inheritDoc} */ + @Override public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta) { + return deployment(meta.alias()); + } + /** * @param alias Class alias. * @return Deployment. @@ -446,7 +451,7 @@ private void recordDeployFailed(Class cls, ClassLoader clsLdr, boolean record evt.message(msg); evt.node(ctx.discovery().localNode()); - evt.type(isTask(cls) ? EVT_CLASS_DEPLOY_FAILED : EVT_TASK_DEPLOY_FAILED); + evt.type(isTask ? EVT_CLASS_DEPLOY_FAILED : EVT_TASK_DEPLOY_FAILED); evt.alias(taskName); ctx.event().record(evt); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java index 01d8604ceaffd..04cfd60610b42 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java @@ -458,6 +458,11 @@ else if (locDep != null) { } } + GridDeployment dep = verStore.searchDeploymentCache(meta); + + if (dep != null) + return dep; + if (reuse) { GridDeployment locDep = locStore.getDeployment(meta); @@ -496,7 +501,12 @@ else if (locDep != null) { // Private or Isolated mode. meta.record(false); - GridDeployment dep = locStore.getDeployment(meta); + GridDeployment dep = ldrStore.searchDeploymentCache(meta); + + if (dep != null) + return dep; + + dep = locStore.getDeployment(meta); if (sndNodeId.equals(ctx.localNodeId())) { if (dep == null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java index 4ba308c9ef6ef..0477523949fce 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java @@ -219,7 +219,7 @@ public class GridDeploymentPerLoaderStore extends GridDeploymentStoreAdapter { IsolatedDeployment dep; synchronized (mux) { - dep = cache.get(meta.classLoaderId()); + dep = (IsolatedDeployment)searchDeploymentCache(meta); if (dep == null) { long undeployTimeout = 0; @@ -331,6 +331,11 @@ else if (d.sequenceNumber() > meta.sequenceNumber()) { return dep; } + /** {@inheritDoc} */ + @Override public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta) { + return cache.get(meta.classLoaderId()); + } + /** {@inheritDoc} */ @Override public void addParticipants(Map allParticipants, Map addedParticipants) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java index 56a3f3e026fb0..75160f089bd66 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java @@ -29,6 +29,7 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeoutException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.events.DeploymentEvent; @@ -277,6 +278,26 @@ else if (log.isDebugEnabled()) } } + /** {@inheritDoc} */ + @Override public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta) { + List deps = null; + + synchronized (mux) { + deps = cache.get(meta.userVersion()); + } + + if (deps != null) { + assert !deps.isEmpty(); + + for (SharedDeployment d : deps) { + if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())) + return d; + } + } + + return null; + } + /** {@inheritDoc} */ @Override @Nullable public GridDeployment getDeployment(GridDeploymentMetadata meta) { assert meta != null; @@ -356,22 +377,14 @@ else if (ctx.discovery().node(meta.senderNodeId()) == null) { return null; } - List deps = cache.get(meta.userVersion()); + dep = (SharedDeployment)searchDeploymentCache(meta); - if (deps != null) { - assert !deps.isEmpty(); + if (dep == null) { + List deps = cache.get(meta.userVersion()); - for (SharedDeployment d : deps) { - if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId()) || - meta.senderNodeId().equals(ctx.localNodeId())) { - // Done. - dep = d; + if (deps != null) { + assert !deps.isEmpty(); - break; - } - } - - if (dep == null) { checkRedeploy(meta); // Find existing deployments that need to be checked @@ -413,12 +426,12 @@ else if (ctx.discovery().node(meta.senderNodeId()) == null) { deps.add(dep); } } - } - else { - checkRedeploy(meta); + else { + checkRedeploy(meta); - // Create peer class loader. - dep = createNewDeployment(meta, true); + // Create peer class loader. + dep = createNewDeployment(meta, true); + } } } @@ -689,7 +702,7 @@ private boolean checkLoadRemoteClass(String clsName, GridDeploymentMetadata meta return false; // Temporary class loader. - ClassLoader temp = new GridDeploymentClassLoader( + GridDeploymentClassLoader temp = new GridDeploymentClassLoader( IgniteUuid.fromUuid(ctx.localNodeId()), meta.userVersion(), meta.deploymentMode(), @@ -712,7 +725,14 @@ private boolean checkLoadRemoteClass(String clsName, GridDeploymentMetadata meta InputStream rsrcIn = null; try { - rsrcIn = temp.getResourceAsStream(path); + boolean timeout = false; + + try { + rsrcIn = temp.getResourceAsStreamEx(path); + } + catch (TimeoutException e) { + timeout = true; + } boolean found = rsrcIn != null; @@ -732,7 +752,7 @@ private boolean checkLoadRemoteClass(String clsName, GridDeploymentMetadata meta return false; } - else + else if (!timeout) // Cache result if classloader is still alive. ldrRsrcCache.put(clsName, found); } @@ -1190,8 +1210,6 @@ boolean hasParticipant(UUID nodeId, IgniteUuid ldrId) { assert nodeId != null; assert ldrId != null; - assert Thread.holdsLock(mux); - return classLoader().hasRegisteredNode(nodeId, ldrId); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java index 07e1e22750a5f..d529eaf47a1b9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java @@ -58,6 +58,12 @@ public interface GridDeploymentStore { */ @Nullable public GridDeployment getDeployment(GridDeploymentMetadata meta); + /** + * @param meta Deployment meatdata. + * @return Grid deployment instance if it was finded in cache, {@code null} otherwise. + */ + @Nullable public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta); + /** * Gets class loader based on ID. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IncompleteDeserializationException.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IncompleteDeserializationException.java new file mode 100644 index 0000000000000..5a440cefa1cca --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IncompleteDeserializationException.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.discovery; + +import org.jetbrains.annotations.NotNull; + +/** + * Exception which can be used to access a message which failed to be deserialized completely using Java serialization. + * Throwed from deserialization methods it can be caught by a caller. + *

+ * Should be {@link RuntimeException} because of limitations of Java serialization mechanisms. + *

+ * Catching {@link ClassNotFoundException} inside deserialization methods cannot do the same trick because + * Java deserialization remembers such exception internally and will rethrow it anyway upon returing to a user. + */ +public class IncompleteDeserializationException extends RuntimeException { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private final DiscoveryCustomMessage m; + + /** + * @param m Message. + */ + public IncompleteDeserializationException(@NotNull DiscoveryCustomMessage m) { + super(null, null, false, false); + + this.m = m; + } + + /** + * @return Message. + */ + @NotNull public DiscoveryCustomMessage message() { + return m; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java index 793b4fc20a97e..c4ec2630c0537 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java @@ -72,9 +72,12 @@ import static org.apache.ignite.events.EventType.EVTS_ALL; import static org.apache.ignite.events.EventType.EVTS_DISCOVERY_ALL; +import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED; import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; import static org.apache.ignite.events.EventType.EVT_NODE_METRICS_UPDATED; +import static org.apache.ignite.events.EventType.EVT_TASK_FAILED; +import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED; import static org.apache.ignite.internal.GridTopic.TOPIC_EVENT; import static org.apache.ignite.internal.events.DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.PUBLIC_POOL; @@ -375,7 +378,7 @@ public int[] enabledEvents() { public synchronized void enableEvents(int[] types) { assert types != null; - ctx.security().authorize(null, SecurityPermission.EVENTS_ENABLE, null); + ctx.security().authorize(SecurityPermission.EVENTS_ENABLE); boolean[] userRecordableEvts0 = userRecordableEvts; boolean[] recordableEvts0 = recordableEvts; @@ -418,7 +421,7 @@ public synchronized void enableEvents(int[] types) { public synchronized void disableEvents(int[] types) { assert types != null; - ctx.security().authorize(null, SecurityPermission.EVENTS_DISABLE, null); + ctx.security().authorize(SecurityPermission.EVENTS_DISABLE); boolean[] userRecordableEvts0 = userRecordableEvts; boolean[] recordableEvts0 = recordableEvts; @@ -507,7 +510,16 @@ private boolean isHiddenEvent(int type) { * @return {@code true} if this is an internal event. */ private boolean isInternalEvent(int type) { - return type == EVT_DISCOVERY_CUSTOM_EVT || F.contains(EVTS_DISCOVERY_ALL, type); + switch (type) { + case EVT_DISCOVERY_CUSTOM_EVT: + case EVT_TASK_FINISHED: + case EVT_TASK_FAILED: + case EVT_JOB_MAPPED: + return true; + + default: + return F.contains(EVTS_DISCOVERY_ALL, type); + } } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java index 1aa065e10df40..8957d9b6ec27b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java @@ -19,8 +19,6 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; -import java.util.Arrays; -import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.util.GridUnsafe; @@ -92,10 +90,6 @@ public FullPageId fullPageId() { + "],\nsuper = [" + super.toString() + "]]"; } - catch (IgniteCheckedException ignored) { - return "Error during call'toString' of PageSnapshot [fullPageId=" + fullPageId() + - ", pageData = " + Arrays.toString(pageData) + ", super=" + super.toString() + "]"; - } finally { GridUnsafe.cleanDirectBuffer(buf); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java index 0031b22efa478..87ca9d689f9c4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java @@ -32,126 +32,126 @@ public abstract class WALRecord { */ public enum RecordType { /** */ - TX_RECORD, + TX_RECORD (0), /** */ - PAGE_RECORD, + PAGE_RECORD (1), /** */ - DATA_RECORD, + DATA_RECORD (2), /** Checkpoint (begin) record */ - CHECKPOINT_RECORD, + CHECKPOINT_RECORD (3), /** WAL segment header record. */ - HEADER_RECORD, + HEADER_RECORD (4), // Delta records. /** */ - INIT_NEW_PAGE_RECORD, + INIT_NEW_PAGE_RECORD (5), /** */ - DATA_PAGE_INSERT_RECORD, + DATA_PAGE_INSERT_RECORD (6), /** */ - DATA_PAGE_INSERT_FRAGMENT_RECORD, + DATA_PAGE_INSERT_FRAGMENT_RECORD (7), /** */ - DATA_PAGE_REMOVE_RECORD, + DATA_PAGE_REMOVE_RECORD (8), /** */ - DATA_PAGE_SET_FREE_LIST_PAGE, + DATA_PAGE_SET_FREE_LIST_PAGE (9), /** */ - BTREE_META_PAGE_INIT_ROOT, + BTREE_META_PAGE_INIT_ROOT (10), /** */ - BTREE_META_PAGE_ADD_ROOT, + BTREE_META_PAGE_ADD_ROOT (11), /** */ - BTREE_META_PAGE_CUT_ROOT, + BTREE_META_PAGE_CUT_ROOT (12), /** */ - BTREE_INIT_NEW_ROOT, + BTREE_INIT_NEW_ROOT (13), /** */ - BTREE_PAGE_RECYCLE, + BTREE_PAGE_RECYCLE (14), /** */ - BTREE_PAGE_INSERT, + BTREE_PAGE_INSERT (15), /** */ - BTREE_FIX_LEFTMOST_CHILD, + BTREE_FIX_LEFTMOST_CHILD (16), /** */ - BTREE_FIX_COUNT, + BTREE_FIX_COUNT (17), /** */ - BTREE_PAGE_REPLACE, + BTREE_PAGE_REPLACE (18), /** */ - BTREE_PAGE_REMOVE, + BTREE_PAGE_REMOVE (19), /** */ - BTREE_PAGE_INNER_REPLACE, + BTREE_PAGE_INNER_REPLACE (20), /** */ - BTREE_FIX_REMOVE_ID, + BTREE_FIX_REMOVE_ID (21), /** */ - BTREE_FORWARD_PAGE_SPLIT, + BTREE_FORWARD_PAGE_SPLIT (22), /** */ - BTREE_EXISTING_PAGE_SPLIT, + BTREE_EXISTING_PAGE_SPLIT (23), /** */ - BTREE_PAGE_MERGE, + BTREE_PAGE_MERGE (24), /** */ - PAGES_LIST_SET_NEXT, + PAGES_LIST_SET_NEXT (25), /** */ - PAGES_LIST_SET_PREVIOUS, + PAGES_LIST_SET_PREVIOUS (26), /** */ - PAGES_LIST_INIT_NEW_PAGE, + PAGES_LIST_INIT_NEW_PAGE (27), /** */ - PAGES_LIST_ADD_PAGE, + PAGES_LIST_ADD_PAGE (28), /** */ - PAGES_LIST_REMOVE_PAGE, + PAGES_LIST_REMOVE_PAGE (29), /** */ - META_PAGE_INIT, + META_PAGE_INIT (30), /** */ - PARTITION_META_PAGE_UPDATE_COUNTERS, + PARTITION_META_PAGE_UPDATE_COUNTERS (31), /** Memory recovering start marker */ - MEMORY_RECOVERY, + MEMORY_RECOVERY (32), /** */ - TRACKING_PAGE_DELTA, + TRACKING_PAGE_DELTA (33), /** Meta page update last successful snapshot id. */ - META_PAGE_UPDATE_LAST_SUCCESSFUL_SNAPSHOT_ID, + META_PAGE_UPDATE_LAST_SUCCESSFUL_SNAPSHOT_ID (34), /** Meta page update last successful full snapshot id. */ - META_PAGE_UPDATE_LAST_SUCCESSFUL_FULL_SNAPSHOT_ID, + META_PAGE_UPDATE_LAST_SUCCESSFUL_FULL_SNAPSHOT_ID (35), /** Meta page update next snapshot id. */ - META_PAGE_UPDATE_NEXT_SNAPSHOT_ID, + META_PAGE_UPDATE_NEXT_SNAPSHOT_ID (36), /** Meta page update last allocated index. */ - META_PAGE_UPDATE_LAST_ALLOCATED_INDEX, + META_PAGE_UPDATE_LAST_ALLOCATED_INDEX (37), /** Partition meta update state. */ - PART_META_UPDATE_STATE, + PART_META_UPDATE_STATE (38), /** Page list meta reset count record. */ - PAGE_LIST_META_RESET_COUNT_RECORD, + PAGE_LIST_META_RESET_COUNT_RECORD (39), /** Switch segment record. * Marker record for indicate end of segment. @@ -160,41 +160,74 @@ public enum RecordType { * that one byte in the end,then we write SWITCH_SEGMENT_RECORD as marker end of segment. * No need write CRC or WAL pointer for this record. It is byte marker record. * */ - SWITCH_SEGMENT_RECORD, + SWITCH_SEGMENT_RECORD (40), /** */ - DATA_PAGE_UPDATE_RECORD, + DATA_PAGE_UPDATE_RECORD (41), /** init */ - BTREE_META_PAGE_INIT_ROOT2, + BTREE_META_PAGE_INIT_ROOT2 (42), /** Partition destroy. */ - PARTITION_DESTROY, + PARTITION_DESTROY (43), /** Snapshot record. */ - SNAPSHOT, + SNAPSHOT (44), /** Metastore data record. */ - METASTORE_DATA_RECORD, + METASTORE_DATA_RECORD (45), /** Exchange record. */ - EXCHANGE, + EXCHANGE (46), /** Reserved for future record. */ - RESERVED, + RESERVED (47), /** Rollback tx record. */ - ROLLBACK_TX_RECORD, + ROLLBACK_TX_RECORD (57), /** */ - PARTITION_META_PAGE_UPDATE_COUNTERS_V2; + PARTITION_META_PAGE_UPDATE_COUNTERS_V2 (58), + + /** Init root meta page (with flags and created version) */ + BTREE_META_PAGE_INIT_ROOT_V3 (59); + + /** Index for serialization. Should be consistent throughout all versions. */ + private final int idx; + + /** + * @param idx Index for serialization. + */ + RecordType(int idx) { + this.idx = idx; + } + + /** + * @return Index for serialization. + */ + public int index() { + return idx; + } /** */ - private static final RecordType[] VALS = RecordType.values(); + private static final RecordType[] VALS; + + static { + RecordType[] recordTypes = RecordType.values(); + + int maxIdx = 0; + for (RecordType recordType : recordTypes) + maxIdx = Math.max(maxIdx, recordType.idx); + + VALS = new RecordType[maxIdx + 1]; + + for (RecordType recordType : recordTypes) + VALS[recordType.idx] = recordType; + } /** */ - public static RecordType fromOrdinal(int ord) { - return ord < 0 || ord >= VALS.length ? null : VALS[ord]; + public static RecordType fromIndex(int idx) { + return idx < 0 || idx >= VALS.length ? null : VALS[idx]; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRootInlineFlagsCreatedVersionRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRootInlineFlagsCreatedVersionRecord.java new file mode 100644 index 0000000000000..1163f1f7e6d05 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRootInlineFlagsCreatedVersionRecord.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record.delta; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.IgniteVersionUtils; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.lang.IgniteProductVersion; + +/** + * + */ +public class MetaPageInitRootInlineFlagsCreatedVersionRecord extends MetaPageInitRootInlineRecord { + /** Created version. */ + private final long flags; + + /** Created version. */ + private final IgniteProductVersion createdVer; + + /** + * @param grpId Cache group ID. + * @param pageId Meta page ID. + * @param rootId Root id. + * @param inlineSize Inline size. + */ + public MetaPageInitRootInlineFlagsCreatedVersionRecord(int grpId, long pageId, long rootId, int inlineSize) { + super(grpId, pageId, rootId, inlineSize); + + createdVer = IgniteVersionUtils.VER; + flags = BPlusMetaIO.FLAGS_DEFAULT; + } + + /** + * @param grpId Cache group ID. + * @param pageId Meta page ID. + * @param rootId Root id. + * @param inlineSize Inline size. + * @param flags Flags. + * @param createdVer The version of ignite that creates this tree. + */ + public MetaPageInitRootInlineFlagsCreatedVersionRecord(int grpId, long pageId, long rootId, int inlineSize, + long flags, IgniteProductVersion createdVer) { + super(grpId, pageId, rootId, inlineSize); + + this.flags = flags; + this.createdVer = createdVer; + } + + /** {@inheritDoc} */ + @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException { + super.applyDelta(pageMem, pageAddr); + + BPlusMetaIO io = BPlusMetaIO.VERSIONS.forPage(pageAddr); + + io.initFlagsAndVersion(pageAddr, flags, createdVer); + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return RecordType.BTREE_META_PAGE_INIT_ROOT_V3; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(MetaPageInitRootInlineFlagsCreatedVersionRecord.class, this, "super", super.toString()); + } + + /** + * @return Created version. + */ + public IgniteProductVersion createdVersion() { + return createdVer; + } + + /** + * @return Meta page flags. + */ + public long flags() { + return flags; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java index 2c02f26be6641..3b9119b6d5b92 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java @@ -117,10 +117,26 @@ public int minorTopologyVersion() { * @param upper Upper bound. * @return {@code True} if this topology version is within provided bounds (inclusive). */ - public boolean isBetween(AffinityTopologyVersion lower, AffinityTopologyVersion upper) { + public final boolean isBetween(AffinityTopologyVersion lower, AffinityTopologyVersion upper) { return compareTo(lower) >= 0 && compareTo(upper) <= 0; } + /** + * @param topVer Test version. + * @return {@code True} if this topology happens strictly after than {@code topVer}. + */ + public final boolean after(AffinityTopologyVersion topVer) { + return compareTo(topVer) > 0; + } + + /** + * @param topVer Test version. + * @return {@code True} if this topology happens strictly before than {@code topVer}. + */ + public final boolean before(AffinityTopologyVersion topVer) { + return compareTo(topVer) < 0; + } + /** {@inheritDoc} */ @Override public void onAckReceived() { // No-op. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java index 3206bae3c100f..2ccf405c30480 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java @@ -931,8 +931,6 @@ public void onPartitionEvicted(int part) { cctx.dr().partitionEvicted(part); cctx.continuousQueries().onPartitionEvicted(part); - - cctx.dataStructures().onPartitionEvicted(part); } } @@ -1192,8 +1190,9 @@ public boolean globalWalEnabled() { */ public void globalWalEnabled(boolean enabled) { if (globalWalEnabled != enabled) { - log.info("Global WAL state for group=" + cacheOrGroupName() + - " changed from " + globalWalEnabled + " to " + enabled); + if (log.isInfoEnabled()) + log.info("Global WAL state for group=" + cacheOrGroupName() + + " changed from " + globalWalEnabled + " to " + enabled); persistGlobalWalState(enabled); @@ -1206,8 +1205,9 @@ public void globalWalEnabled(boolean enabled) { */ public void localWalEnabled(boolean enabled) { if (localWalEnabled != enabled){ - log.info("Local WAL state for group=" + cacheOrGroupName() + - " changed from " + localWalEnabled + " to " + enabled); + if (log.isInfoEnabled()) + log.info("Local WAL state for group=" + cacheOrGroupName() + + " changed from " + localWalEnabled + " to " + enabled); persistLocalWalState(enabled); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java index bb0b59bf9845e..c7a59a4606df7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java @@ -161,6 +161,13 @@ public boolean isStaticallyConfigured() { return staticallyConfigured; } + /** + * @return Long which bits represent some flags. + */ + public long getFlags() { + return flags; + } + /** * @param ois ObjectInputStream. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachesRegistry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachesRegistry.java index d37f69ca5f6ad..649b4d1ecd20f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachesRegistry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachesRegistry.java @@ -31,6 +31,7 @@ import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.lang.GridPlainRunnable; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.Nullable; @@ -257,7 +258,11 @@ private IgniteInternalFuture registerAllCachesAndGroups( if (cachesToPersist.isEmpty()) return cachesConfPersistFuture = new GridFinishedFuture<>(); - return cachesConfPersistFuture = persistCacheConfigurations(cachesToPersist); + List cacheConfigsToPersist = cacheDescriptors.stream() + .map(DynamicCacheDescriptor::toStoredData) + .collect(Collectors.toList()); + + return cachesConfPersistFuture = persistCacheConfigurations(cacheConfigsToPersist); } /** @@ -273,16 +278,12 @@ private boolean shouldPersist(CacheConfiguration cacheCfg) { } /** - * Persists cache configurations from given {@code cacheDescriptors}. + * Persists cache configurations. * - * @param cacheDescriptors Cache descriptors to retrieve cache configurations. + * @param cacheConfigsToPersist Cache configurations to persist. * @return Future that will be completed when all cache configurations will be persisted to cache work directory. */ - private IgniteInternalFuture persistCacheConfigurations(List cacheDescriptors) { - List cacheConfigsToPersist = cacheDescriptors.stream() - .map(DynamicCacheDescriptor::toStoredData) - .collect(Collectors.toList()); - + private IgniteInternalFuture persistCacheConfigurations(List cacheConfigsToPersist) { // Pre-create cache work directories if they don't exist. for (StoredCacheData data : cacheConfigsToPersist) { try { @@ -297,13 +298,15 @@ private IgniteInternalFuture persistCacheConfigurations(List { - try { - for (StoredCacheData data : cacheConfigsToPersist) - cctx.pageStore().storeCacheData(data, false); - } - catch (IgniteCheckedException e) { - U.error(log, "Error while saving cache configurations on disk", e); + return cctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() { + @Override public void run() { + try { + for (StoredCacheData data : cacheConfigsToPersist) + cctx.cache().saveCacheConfiguration(data, false); + } + catch (IgniteCheckedException e) { + U.error(log, "Error while saving cache configurations on disk", e); + } } }); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java index 81675bdd85111..a9d68aad0574e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java @@ -33,6 +33,7 @@ import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.stream.Collectors; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cache.CacheExistsException; @@ -136,15 +137,13 @@ public ClusterCachesInfo(GridKernalContext ctx) { * Filters all dynamic cache descriptors and groups that were not presented on node start * and were received with grid discovery data. * - * @param localConfigData node's local cache configurations - * (both from static config and stored with persistent caches). - * + * @param localCachesOnStart Caches which were already presented on node start. */ - public void filterDynamicCacheDescriptors(CacheJoinNodeDiscoveryData localConfigData) { + public void filterDynamicCacheDescriptors(Set localCachesOnStart) { if (ctx.isDaemon()) return; - filterRegisteredCachesAndCacheGroups(localConfigData.caches()); + filterRegisteredCachesAndCacheGroups(localCachesOnStart); List> locJoinStartCaches = locJoinCachesCtx.caches(); @@ -163,14 +162,14 @@ public void filterDynamicCacheDescriptors(CacheJoinNodeDiscoveryData localConfig * * @param locCaches Caches from local node configuration (static configuration and persistent caches). */ - private void filterRegisteredCachesAndCacheGroups(Map locCaches) { + private void filterRegisteredCachesAndCacheGroups(Set locCaches) { //filter registered caches Iterator> cachesIter = registeredCaches.entrySet().iterator(); while (cachesIter.hasNext()) { Map.Entry e = cachesIter.next(); - if (!locCaches.containsKey(e.getKey())) { + if (!locCaches.contains(e.getKey())) { cachesIter.remove(); ctx.discovery().removeCacheFilter(e.getKey()); @@ -1660,6 +1659,53 @@ else if (joiningNodeData instanceof CacheJoinNodeDiscoveryData) } } + /** + * @param data Joining node data. + * @return Message with error or null if everything was OK. + */ + public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData data) { + if (data.hasJoiningNodeData()) { + Serializable joiningNodeData = data.joiningNodeData(); + + if (joiningNodeData instanceof CacheJoinNodeDiscoveryData) { + CacheJoinNodeDiscoveryData joinData = (CacheJoinNodeDiscoveryData)joiningNodeData; + + Set problemCaches = null; + + for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : joinData.caches().values()) { + CacheConfiguration cfg = cacheInfo.cacheData().config(); + + if (!registeredCaches.containsKey(cfg.getName())) { + String conflictErr = checkCacheConflict(cfg); + + if (conflictErr != null) { + U.warn(log, "Ignore cache received from joining node. " + conflictErr); + + continue; + } + + long flags = cacheInfo.getFlags(); + + if (flags == 1L) { + if (problemCaches == null) + problemCaches = new HashSet<>(); + + problemCaches.add(cfg.getName()); + } + } + } + + if (!F.isEmpty(problemCaches)) + return problemCaches.stream().collect(Collectors.joining(", ", + "Joining node has caches with data which are not presented on cluster, " + + "it could mean that they were already destroyed, to add the node to cluster - " + + "remove directories with the caches[", "]")); + } + } + + return null; + } + /** * @param clientData Discovery data. * @param clientNodeId Client node ID. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java index 268756238366b..3dbee2a9b140a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java @@ -394,7 +394,7 @@ public void receivedFrom(UUID nodeId) { /** * @return ID of node provided cache configuration in discovery data. */ - @Nullable public UUID receivedFrom() { + public @Nullable UUID receivedFrom() { return rcvdFrom; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java index 7db59ca58c2fd..fa42caffda2a2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java @@ -678,14 +678,14 @@ public boolean isLocal() { * @return {@code True} if cache is replicated cache. */ public boolean isReplicated() { - return cacheCfg.getCacheMode() == CacheMode.REPLICATED; + return config().getCacheMode() == CacheMode.REPLICATED; } /** * @return {@code True} if cache is partitioned cache. */ public boolean isPartitioned() { - return cacheCfg.getCacheMode() == CacheMode.PARTITIONED; + return config().getCacheMode() == CacheMode.PARTITIONED; } /** @@ -699,7 +699,7 @@ public boolean isDrEnabled() { * @return {@code True} in case cache supports query. */ public boolean isQueryEnabled() { - return !F.isEmpty(cacheCfg.getQueryEntities()); + return !F.isEmpty(config().getQueryEntities()); } /** @@ -824,7 +824,7 @@ public void checkSecurity(SecurityPermission op) throws SecurityException { if (CU.isSystemCache(name())) return; - ctx.security().authorize(name(), op, null); + ctx.security().authorize(name(), op); } /** @@ -852,14 +852,16 @@ public boolean rebalanceEnabled() { * @return {@code True} if atomic. */ public boolean atomic() { - return cacheCfg.getAtomicityMode() == ATOMIC; + return config().getAtomicityMode() == ATOMIC; } /** * @return {@code True} if transactional. */ public boolean transactional() { - return cacheCfg.getAtomicityMode() == TRANSACTIONAL; + CacheConfiguration cfg = config(); + + return cfg.getAtomicityMode() == TRANSACTIONAL; } /** @@ -1039,9 +1041,15 @@ public GridCacheAdapter cache() { /** * @return Cache configuration for given cache instance. + * @throws IllegalStateException If this cache context was cleaned up. */ public CacheConfiguration config() { - return cacheCfg; + CacheConfiguration res = cacheCfg; + + if (res == null) + throw new IllegalStateException((new CacheStoppedException(name()))); + + return res; } /** @@ -1050,7 +1058,7 @@ public CacheConfiguration config() { * are set to {@code true} or the store is local. */ public boolean writeToStoreFromDht() { - return store().isLocal() || cacheCfg.isWriteBehindEnabled(); + return store().isLocal() || config().isWriteBehindEnabled(); } /** @@ -1502,56 +1510,56 @@ public boolean deploymentEnabled() { * @return {@code True} if store read-through mode is enabled. */ public boolean readThrough() { - return cacheCfg.isReadThrough() && !skipStore(); + return config().isReadThrough() && !skipStore(); } /** * @return {@code True} if store and read-through mode are enabled in configuration. */ public boolean readThroughConfigured() { - return store().configured() && cacheCfg.isReadThrough(); + return store().configured() && config().isReadThrough(); } /** * @return {@code True} if {@link CacheConfiguration#isLoadPreviousValue()} flag is set. */ public boolean loadPreviousValue() { - return cacheCfg.isLoadPreviousValue(); + return config().isLoadPreviousValue(); } /** * @return {@code True} if store write-through is enabled. */ public boolean writeThrough() { - return cacheCfg.isWriteThrough() && !skipStore(); + return config().isWriteThrough() && !skipStore(); } /** * @return {@code True} if invalidation is enabled. */ public boolean isInvalidate() { - return cacheCfg.isInvalidate(); + return config().isInvalidate(); } /** * @return {@code True} if synchronous commit is enabled. */ public boolean syncCommit() { - return cacheCfg.getWriteSynchronizationMode() == FULL_SYNC; + return config().getWriteSynchronizationMode() == FULL_SYNC; } /** * @return {@code True} if synchronous rollback is enabled. */ public boolean syncRollback() { - return cacheCfg.getWriteSynchronizationMode() == FULL_SYNC; + return config().getWriteSynchronizationMode() == FULL_SYNC; } /** * @return {@code True} if only primary node should be updated synchronously. */ public boolean syncPrimary() { - return cacheCfg.getWriteSynchronizationMode() == PRIMARY_SYNC; + return config().getWriteSynchronizationMode() == PRIMARY_SYNC; } /** @@ -1767,7 +1775,7 @@ public boolean keepBinary() { * of {@link CacheConfiguration#isCopyOnRead()}. */ public boolean needValueCopy() { - return affNode && cacheCfg.isCopyOnRead(); + return affNode && config().isCopyOnRead(); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java index 726a6c88c1804..c095ebe27fdc7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java @@ -20,6 +20,7 @@ import java.util.Collection; import java.util.UUID; import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.events.CacheEvent; import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; @@ -388,11 +389,18 @@ public boolean isRecordable(int type) { GridCacheContext cctx0 = cctx; // Event recording is impossible in recovery mode. - if (cctx0 != null && cctx0.kernalContext().recoveryMode()) + if (cctx0 == null || cctx0.kernalContext().recoveryMode()) return false; - return cctx0 != null && cctx0.userCache() && cctx0.gridEvents().isRecordable(type) - && !cctx0.config().isEventsDisabled(); + try { + CacheConfiguration cfg = cctx0.config(); + + return cctx0.userCache() && cctx0.gridEvents().isRecordable(type) && !cfg.isEventsDisabled(); + } + catch (IllegalStateException e) { + // Cache context was cleaned up. + return false; + } } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java index 658ca2a8de039..7e1d867c4ec21 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java @@ -133,6 +133,8 @@ public void leaveNoLock() { ctx.tm().resetContext(); ctx.mvcc().contextReset(); + ctx.tm().leaveNearTxSystemSection(); + // Unwind eviction notifications. if (!ctx.shared().closed(ctx)) CU.unwindEvicts(ctx); @@ -172,6 +174,8 @@ public void leave() { onEnter(); + ctx.tm().enterNearTxSystemSection(); + Lock lock = rwLock.readLock(); lock.lock(); @@ -239,6 +243,8 @@ public void leaveNoLock(CacheOperationContext prev) { // Unwind eviction notifications. CU.unwindEvicts(ctx); + ctx.tm().leaveNearTxSystemSection(); + // Return back previous thread local operation context per call. ctx.operationContextPerCall(prev); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java index 1fdbed59742e5..0e8488ae331d1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java @@ -96,6 +96,7 @@ import static org.apache.ignite.internal.processors.cache.GridCacheOperation.DELETE; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.TRANSFORM; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.UPDATE; +import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.RENTING; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE; /** @@ -1124,8 +1125,6 @@ else if (interceptorVal != val0) null, topVer); } - - cctx.dataStructures().onEntryUpdated(key, false, keepBinary); } finally { unlockEntry(); @@ -1348,8 +1347,6 @@ else if (log.isDebugEnabled()) topVer); } - cctx.dataStructures().onEntryUpdated(key, true, keepBinary); - deferred = cctx.deferredDelete() && !detached() && !isInternal(); if (intercept) @@ -1730,8 +1727,6 @@ else if (ttl != CU.TTL_ZERO) onUpdateFinished(updateCntr); } - cctx.dataStructures().onEntryUpdated(key, op == DELETE, keepBinary); - if (intercept) { if (op == UPDATE) cctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(cctx, key, key0, updated, updated0, keepBinary, 0L)); @@ -2018,8 +2013,6 @@ else if (ttl != CU.TTL_ZERO) topVer); } - cctx.dataStructures().onEntryUpdated(key, c.op == DELETE, keepBinary); - if (intercept && c.wasIntercepted) { assert c.op == UPDATE || c.op == DELETE : c.op; @@ -2923,8 +2916,6 @@ else if (deletedUnlocked()) updateCntr, null, topVer); - - cctx.dataStructures().onEntryUpdated(key, false, false); } onUpdateFinished(updateCntr); @@ -3720,6 +3711,7 @@ protected boolean storeValue( GridCacheVersion ver, @Nullable IgnitePredicate predicate) throws IgniteCheckedException { assert lock.isHeldByCurrentThread(); + assert localPartition() == null || localPartition().state() != RENTING : localPartition(); UpdateClosure closure = new UpdateClosure(this, val, ver, expireTime, predicate); @@ -4273,6 +4265,18 @@ private void obsoleteVersionExtras(@Nullable GridCacheVersion obsoleteVer, GridC protected final void checkOwnerChanged(@Nullable CacheLockCandidates prevOwners, @Nullable CacheLockCandidates owners, CacheObject val) { + checkOwnerChanged(prevOwners, owners, val, null); + } + /** + * @param prevOwners Previous owners. + * @param owners Current owners. + * @param val Entry value. + * @param checkingCandidate flag to enable or disable check of candidate chain + */ + protected final void checkOwnerChanged(@Nullable CacheLockCandidates prevOwners, + @Nullable CacheLockCandidates owners, + CacheObject val, + CacheLockCandidates checkingCandidate) { assert !lock.isHeldByCurrentThread(); if (prevOwners != null && owners == null) { @@ -4308,7 +4312,8 @@ protected final void checkOwnerChanged(@Nullable CacheLockCandidates prevOwners, if (locked) { cctx.mvcc().callback().onOwnerChanged(this, owner); - if (owner.local()) + if (owner.local() + && (checkingCandidate == null || !checkingCandidate.hasCandidate(owner.version()))) checkThreadChain(owner); if (cctx.events().isRecordable(EVT_CACHE_OBJECT_LOCKED)) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java index 108bc21cf1d0f..1dcdf013dbc62 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCachePartitionExchangeManager.java @@ -96,6 +96,7 @@ import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.latch.ExchangeLatchManager; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridClientPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; +import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal; import org.apache.ignite.internal.processors.cache.persistence.snapshot.IgniteCacheSnapshotManager; import org.apache.ignite.internal.processors.cache.persistence.snapshot.SnapshotDiscoveryMessage; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; @@ -107,6 +108,7 @@ import org.apache.ignite.internal.processors.timeout.GridTimeoutObject; import org.apache.ignite.internal.util.GridListSet; import org.apache.ignite.internal.util.GridPartitionStateMap; +import org.apache.ignite.internal.util.GridStringBuilder; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; @@ -1979,6 +1981,41 @@ public void dumpDebugInfo(@Nullable GridDhtPartitionsExchangeFuture exchFut) thr diagCtx.send(cctx.kernalContext(), null); } + /** + * Builds warning string for long running transaction. + * + * @param tx Transaction. + * @param curTime Current timestamp. + * @return Warning string. + */ + private String longRunningTransactionWarning(IgniteInternalTx tx, long curTime) { + GridStringBuilder warning = new GridStringBuilder() + .a(">>> Transaction [startTime=") + .a(formatTime(tx.startTime())) + .a(", curTime=") + .a(formatTime(curTime)); + + if (tx instanceof GridNearTxLocal) { + GridNearTxLocal nearTxLoc = (GridNearTxLocal)tx; + + long sysTimeCurr = nearTxLoc.systemTimeCurrent(); + + //in some cases totalTimeMillis can be less than systemTimeMillis, as they are calculated with different precision + long userTime = Math.max(curTime - nearTxLoc.startTime() - sysTimeCurr, 0); + + warning.a(", systemTime=") + .a(sysTimeCurr) + .a(", userTime=") + .a(userTime); + } + + warning.a(", tx=") + .a(tx) + .a("]"); + + return warning.toString(); + } + /** * @param timeout Operation timeout. * @return {@code True} if found long running operations. @@ -2005,8 +2042,7 @@ private boolean dumpLongRunningOperations0(long timeout) { found = true; if (warnings.canAddMessage()) { - warnings.add(">>> Transaction [startTime=" + formatTime(tx.startTime()) + - ", curTime=" + formatTime(curTime) + ", tx=" + tx + ']'); + warnings.add(longRunningTransactionWarning(tx, curTime)); if (ltrDumpLimiter.allowAction(tx)) dumpLongRunningTransaction(tx); @@ -3044,7 +3080,7 @@ else if (task instanceof ForceRebalanceExchangeTask) { ? Math.min(curTimeout, dumpTimeout) : dumpTimeout; - blockingSectionEnd(); + blockingSectionBegin(); try { resVer = exchFut.get(exchTimeout, TimeUnit.MILLISECONDS); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java index 429b12090b97a..8eed3a143957b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProcessor.java @@ -21,11 +21,9 @@ import java.util.Collection; import java.util.Collections; import java.util.Comparator; -import java.util.Deque; import java.util.HashMap; import java.util.HashSet; import java.util.IdentityHashMap; -import java.util.LinkedList; import java.util.List; import java.util.ListIterator; import java.util.Map; @@ -35,6 +33,8 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Supplier; import java.util.stream.Collectors; import javax.management.MBeanServer; import org.apache.ignite.IgniteCheckedException; @@ -80,7 +80,6 @@ import org.apache.ignite.internal.processors.GridProcessorAdapter; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache; -import org.apache.ignite.internal.processors.cache.CacheJoinNodeDiscoveryData.CacheInfo; import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl; import org.apache.ignite.internal.processors.cache.datastructures.CacheDataStructuresManager; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCache; @@ -135,6 +134,7 @@ import org.apache.ignite.internal.processors.query.schema.SchemaNodeLeaveExchangeWorkerTask; import org.apache.ignite.internal.processors.query.schema.message.SchemaAbstractDiscoveryMessage; import org.apache.ignite.internal.processors.query.schema.message.SchemaProposeDiscoveryMessage; +import org.apache.ignite.internal.processors.security.OperationSecurityContext; import org.apache.ignite.internal.processors.security.SecurityContext; import org.apache.ignite.internal.processors.timeout.GridTimeoutObject; import org.apache.ignite.internal.suggestions.GridPerformanceSuggestions; @@ -158,6 +158,7 @@ import org.apache.ignite.lang.IgniteClosure; import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.lang.IgnitePredicate; +import org.apache.ignite.lang.IgniteRunnable; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.lifecycle.LifecycleAware; import org.apache.ignite.marshaller.Marshaller; @@ -190,11 +191,13 @@ import static org.apache.ignite.configuration.DeploymentMode.SHARED; import static org.apache.ignite.internal.GridComponent.DiscoveryDataExchangeType.CACHE_PROC; import static org.apache.ignite.internal.IgniteComponentType.JTA; +import static org.apache.ignite.internal.IgniteFeatures.LRT_SYSTEM_USER_TIME_DUMP_SETTINGS; import static org.apache.ignite.internal.IgniteFeatures.TRANSACTION_OWNER_THREAD_DUMP_PROVIDING; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_CONSISTENCY_CHECK_SKIPPED; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_TX_CONFIG; import static org.apache.ignite.internal.processors.cache.GridCacheUtils.isNearEnabled; import static org.apache.ignite.internal.processors.cache.GridCacheUtils.isPersistentCache; +import static org.apache.ignite.internal.processors.security.SecurityUtils.nodeSecurityContext; import static org.apache.ignite.internal.util.IgniteUtils.doInParallel; /** @@ -211,9 +214,12 @@ public class GridCacheProcessor extends GridProcessorAdapter { "(the config of the cache '%s' has to be merged which is impossible on active grid). " + "Deactivate grid and retry node join or clean the joining node."; /** */ - private final boolean startClientCaches = - IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_START_CACHES_ON_JOIN, false); + private static final String CACHE_NAME_AND_OPERATION_FORMAT = "[cacheName=%s, operation=%s]"; + /** */ + private static final String CACHE_NAMES_AND_OPERATION_FORMAT = "[cacheNames=%s, operation=%s]"; + + /** */ private final boolean walFsyncWithDedicatedWorker = IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_WAL_FSYNC_WITH_DEDICATED_WORKER, false); @@ -231,6 +237,9 @@ public class GridCacheProcessor extends GridProcessorAdapter { /** */ private final ConcurrentMap cacheGrps = new ConcurrentHashMap<>(); + /** Flag that caches were already filtered out. */ + private final AtomicBoolean alreadyFiltered = new AtomicBoolean(); + /** */ private final Map> caches; @@ -240,9 +249,6 @@ public class GridCacheProcessor extends GridProcessorAdapter { /** Map of proxies. */ private final ConcurrentHashMap> jCacheProxies; - /** Caches stop sequence. */ - private final Deque stopSeq; - /** Transaction interface implementation. */ private IgniteTransactionsImpl transactions; @@ -262,6 +268,9 @@ public class GridCacheProcessor extends GridProcessorAdapter { /** */ private ClusterCachesInfo cachesInfo; + /** */ + private GridLocalConfigManager locCfgMgr; + /** */ private IdentityHashMap sesHolders = new IdentityHashMap<>(); @@ -297,24 +306,11 @@ public GridCacheProcessor(GridKernalContext ctx) { caches = new ConcurrentHashMap<>(); jCacheProxies = new ConcurrentHashMap<>(); - stopSeq = new LinkedList<>(); internalCaches = new HashSet<>(); marsh = MarshallerUtils.jdkMarshaller(ctx.igniteInstanceName()); } - /** - * @param cfg Initializes cache configuration with proper defaults. - * @param cacheObjCtx Cache object context. - * @throws IgniteCheckedException If configuration is not valid. - */ - private void initialize(CacheConfiguration cfg, CacheObjectContext cacheObjCtx) - throws IgniteCheckedException { - CU.initializeConfigDefaults(log, cfg, cacheObjCtx); - - ctx.igfsHelper().preProcessCacheConfiguration(cfg); - } - /** * @param cfg Configuration to check for possible performance issues. * @param hasStore {@code True} if store is configured. @@ -710,31 +706,6 @@ private void cleanup(CacheConfiguration cfg, @Nullable Object rsrc, boolean near } } - /** - * @throws IgniteCheckedException If failed. - */ - private void restoreCacheConfigurations() throws IgniteCheckedException { - if (ctx.isDaemon()) - return; - - Map caches = new HashMap<>(); - - Map templates = new HashMap<>(); - - addCacheOnJoinFromConfig(caches, templates); - - CacheJoinNodeDiscoveryData discoData = new CacheJoinNodeDiscoveryData( - IgniteUuid.randomUuid(), - caches, - templates, - startAllCachesOnClientStart() - ); - - localConfigs = discoData; - - cachesInfo.onStart(discoData); - } - /** {@inheritDoc} */ @SuppressWarnings({"unchecked"}) @Override public void start() throws IgniteCheckedException { @@ -758,14 +729,20 @@ private void restoreCacheConfigurations() throws IgniteCheckedException { sharedCtx = createSharedContext(ctx, sessionListeners); + locCfgMgr = new GridLocalConfigManager(this, ctx); + transactions = new IgniteTransactionsImpl(sharedCtx, null); // Start shared managers. for (GridCacheSharedManager mgr : sharedCtx.managers()) mgr.start(sharedCtx); - if (!ctx.isDaemon() && (!CU.isPersistenceEnabled(ctx.config())) || ctx.config().isClientMode()) - restoreCacheConfigurations(); + if (!ctx.isDaemon() && (!CU.isPersistenceEnabled(ctx.config())) || ctx.config().isClientMode()) { + CacheJoinNodeDiscoveryData data = locCfgMgr.restoreCacheConfigurations(); + + if (data != null) + cachesInfo.onStart(data); + } if (log.isDebugEnabled()) log.debug("Started cache processor."); @@ -774,123 +751,16 @@ private void restoreCacheConfigurations() throws IgniteCheckedException { ctx.authentication().cacheProcessorStarted(); } - /** - * @param cfg Cache configuration. - * @param sql SQL flag. - * @param caches Caches map. - * @param templates Templates map. - * @throws IgniteCheckedException If failed. - */ - private void addCacheOnJoin(CacheConfiguration cfg, boolean sql, - Map caches, - Map templates) throws IgniteCheckedException { - String cacheName = cfg.getName(); - - CU.validateCacheName(cacheName); - - cloneCheckSerializable(cfg); - - CacheObjectContext cacheObjCtx = ctx.cacheObjects().contextForCache(cfg); - - // Initialize defaults. - initialize(cfg, cacheObjCtx); - - StoredCacheData cacheData = new StoredCacheData(cfg); - - cacheData.sql(sql); - - if (GridCacheUtils.isCacheTemplateName(cacheName)) - templates.put(cacheName, new CacheInfo(cacheData, CacheType.USER, false, 0, true)); - else { - if (caches.containsKey(cacheName)) { - throw new IgniteCheckedException("Duplicate cache name found (check configuration and " + - "assign unique name to each cache): " + cacheName); - } - - CacheType cacheType = cacheType(cacheName); - - if (cacheType != CacheType.USER && cfg.getDataRegionName() == null) - cfg.setDataRegionName(sharedCtx.database().systemDateRegionName()); - - addStoredCache(caches, cacheData, cacheName, cacheType, true); - } - } - - /** - * Add stored cache data to caches storage. - * - * @param caches Cache storage. - * @param cacheData Cache data to add. - * @param cacheName Cache name. - * @param cacheType Cache type. - * @param isStaticalyConfigured Statically configured flag. - */ - private void addStoredCache(Map caches, StoredCacheData cacheData, String cacheName, - CacheType cacheType, boolean isStaticalyConfigured) { - if (!caches.containsKey(cacheName)) { - if (!cacheType.userCache()) - stopSeq.addLast(cacheName); - else - stopSeq.addFirst(cacheName); - } - - caches.put(cacheName, new CacheInfo(cacheData, cacheType, cacheData.sql(), 0, isStaticalyConfigured)); - } /** - * @param caches Caches map. - * @param templates Templates map. - * @throws IgniteCheckedException If failed. + * @param cfg Initializes cache configuration with proper defaults. + * @param cacheObjCtx Cache object context. + * @throws IgniteCheckedException If configuration is not valid. */ - private void addCacheOnJoinFromConfig( - Map caches, - Map templates - ) throws IgniteCheckedException { - assert !ctx.config().isDaemon(); - - CacheConfiguration[] cfgs = ctx.config().getCacheConfiguration(); - - for (int i = 0; i < cfgs.length; i++) { - CacheConfiguration cfg = new CacheConfiguration(cfgs[i]); - - // Replace original configuration value. - cfgs[i] = cfg; - - addCacheOnJoin(cfg, false, caches, templates); - } - - if (CU.isPersistenceEnabled(ctx.config()) && ctx.cache().context().pageStore() != null) { - Map storedCaches = ctx.cache().context().pageStore().readCacheConfigurations(); - - if (!F.isEmpty(storedCaches)) { - List skippedConfigs = new ArrayList<>(); - - for (StoredCacheData storedCacheData : storedCaches.values()) { - String cacheName = storedCacheData.config().getName(); - - CacheType type = cacheType(cacheName); - - if (!caches.containsKey(cacheName)) - // No static cache - add the configuration. - addStoredCache(caches, storedCacheData, cacheName, type, false); - else { - // A static cache with the same name already exists. - if (!keepStaticCacheConfiguration) { - addStoredCache(caches, storedCacheData, cacheName, type, false); - - if (type == CacheType.USER) - skippedConfigs.add(cacheName); - } - } - } + void initialize(CacheConfiguration cfg, CacheObjectContext cacheObjCtx) throws IgniteCheckedException { + CU.initializeConfigDefaults(log, cfg, cacheObjCtx); - if (!F.isEmpty(skippedConfigs)) - U.warn(log, "Static configuration for the following caches will be ignored because a persistent " + - "cache with the same name already exist (see " + - "https://apacheignite.readme.io/docs/cache-configuration for more information): " + - skippedConfigs); - } - } + ctx.igfsHelper().preProcessCacheConfiguration(cfg); } /** @@ -1041,7 +911,7 @@ private void checkConsistency() throws IgniteCheckedException { * @param cancel Cancel. */ public void stopCaches(boolean cancel) { - for (String cacheName : stopSeq) { + for (String cacheName : locCfgMgr.stopSequence()) { GridCacheAdapter cache = stoppedCaches.remove(cacheName); if (cache != null) @@ -1109,7 +979,7 @@ public void onKernalStopCaches(boolean cancel) { aff.cancelFutures(affErr); } - for (String cacheName : stopSeq) { + for (String cacheName : locCfgMgr.stopSequence()) { GridCacheAdapter cache = caches.remove(cacheName); if (cache != null) { @@ -1312,16 +1182,26 @@ private void stopCache(GridCacheAdapter cache, boolean cancel, boolean des U.stopLifecycleAware(log, lifecycleAwares(ctx.group(), cache.configuration(), ctx.store().configuredStore())); - IgnitePageStoreManager pageStore; + try { + IgniteWriteAheadLogManager wal; - if (destroy && (pageStore = sharedCtx.pageStore()) != null) { - try { + if ((wal = sharedCtx.wal()) != null) + wal.flush(null, false); + } + catch (IgniteCheckedException e) { + U.error(log, "Failed to flush WAL data while destroying cache" + + "[cache=" + ctx.name() + "]", e); + } + + try { + IgnitePageStoreManager pageStore; + + if (destroy && (pageStore = sharedCtx.pageStore()) != null) pageStore.removeCacheData(new StoredCacheData(ctx.config())); - } - catch (IgniteCheckedException e) { - U.error(log, "Failed to delete cache configuration data while destroying cache" + - "[cache=" + ctx.name() + "]", e); - } + } + catch (IgniteCheckedException e) { + U.error(log, "Failed to delete cache configuration data while destroying cache" + + "[cache=" + ctx.name() + "]", e); } if (log.isInfoEnabled()) { @@ -1899,10 +1779,9 @@ public CacheMode cacheMode(String cacheName) { * @return Caches to be started when this node starts. */ @Nullable public LocalJoinCachesContext localJoinCachesContext() { - if (ctx.discovery().localNode().order() == 1 && localConfigs != null) - cachesInfo.filterDynamicCacheDescriptors(localConfigs); - - localConfigs = null; + if (ctx.discovery().localNode().order() == 1 && alreadyFiltered.compareAndSet(false, true)) { + cachesInfo.filterDynamicCacheDescriptors(locCfgMgr.localCachesOnStart()); + } return cachesInfo.localJoinCachesContext(); } @@ -2900,6 +2779,8 @@ private void processCacheStopRequestOnExchangeDone(ExchangeActions exchActions) for (ExchangeActions.CacheActionData action: cachesToStopByGrp.getValue()) { stopGateway(action.request()); + context().tm().rollbackTransactionsForStoppingCache(action.descriptor().cacheId()); + sharedCtx.database().checkpointReadLock(); try { @@ -3012,39 +2893,10 @@ public void onExchangeDone( ctx.service().updateUtilityCache(); } - rollbackCoveredTx(exchActions); - if (err == null) processCacheStopRequestOnExchangeDone(exchActions); } - /** - * Rollback tx covered by stopped caches. - * - * @param exchActions Change requests. - */ - private void rollbackCoveredTx(ExchangeActions exchActions) { - if (!exchActions.cacheGroupsToStop().isEmpty() || !exchActions.cacheStopRequests().isEmpty()) { - Set cachesToStop = new HashSet<>(); - - for (ExchangeActions.CacheGroupActionData act : exchActions.cacheGroupsToStop()) { - @Nullable CacheGroupContext grpCtx = context().cache().cacheGroup(act.descriptor().groupId()); - - if (grpCtx != null && grpCtx.sharedGroup()) - cachesToStop.addAll(grpCtx.cacheIds()); - } - - for (ExchangeActions.CacheActionData act : exchActions.cacheStopRequests()) - cachesToStop.add(act.descriptor().cacheId()); - - if (!cachesToStop.isEmpty()) { - IgniteTxManager tm = context().tm(); - - tm.rollbackTransactionsForCaches(cachesToStop); - } - } - } - /** * @param grpId Group ID. */ @@ -3218,12 +3070,17 @@ private GridCacheSharedContext createSharedContext( } /** {@inheritDoc} */ - @Nullable @Override public IgniteNodeValidationResult validateNode( + @Override public @Nullable IgniteNodeValidationResult validateNode( ClusterNode node, JoiningNodeDiscoveryData discoData ) { if(!cachesInfo.isMergeConfigSupports(node)) return null; + String validationRes = cachesInfo.validateJoiningNodeData(discoData); + + if (validationRes != null) + return new IgniteNodeValidationResult(node.id(), validationRes, validationRes); + if (discoData.hasJoiningNodeData() && discoData.joiningNodeData() instanceof CacheJoinNodeDiscoveryData) { CacheJoinNodeDiscoveryData nodeData = (CacheJoinNodeDiscoveryData)discoData.joiningNodeData(); @@ -3231,22 +3088,28 @@ private GridCacheSharedContext createSharedContext( StringBuilder errorMessage = new StringBuilder(); - for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : nodeData.caches().values()) { - try { - byte[] secCtxBytes = node.attribute(IgniteNodeAttributes.ATTR_SECURITY_SUBJECT_V2); + SecurityContext secCtx = null; - if (secCtxBytes != null) { - SecurityContext secCtx = U.unmarshal(marsh, secCtxBytes, U.resolveClassLoader(ctx.config())); + if (ctx.security().enabled()) { + try { + secCtx = nodeSecurityContext(marsh, U.resolveClassLoader(ctx.config()), node); + } + catch (SecurityException se) { + errorMessage.append(se.getMessage()); + } + } - if (secCtx != null && cacheInfo.cacheType() == CacheType.USER) - authorizeCacheCreate(cacheInfo.cacheData().config(), secCtx); + for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : nodeData.caches().values()) { + if (secCtx != null && cacheInfo.cacheType() == CacheType.USER) { + try (OperationSecurityContext s = ctx.security().withContext(secCtx)) { + authorizeCacheCreate(cacheInfo.cacheData().config()); } - } - catch (SecurityException | IgniteCheckedException ex) { - if (errorMessage.length() > 0) - errorMessage.append("\n"); + catch (SecurityException ex) { + if (errorMessage.length() > 0) + errorMessage.append("\n"); - errorMessage.append(ex.getMessage()); + errorMessage.append(ex.getMessage()); + } } DynamicCacheDescriptor localDesc = cacheDescriptor(cacheInfo.cacheData().config().getName()); @@ -3460,13 +3323,6 @@ private void stopCachesOnClientReconnect(Collection stoppedCac } } - /** - * @return {@code True} if need locally start all existing caches on client node start. - */ - private boolean startAllCachesOnClientStart() { - return startClientCaches && ctx.clientNode(); - } - /** * Dynamically starts cache using template configuration. * @@ -3691,8 +3547,10 @@ public IgniteInternalFuture dynamicStartCache( ) { assert cacheName != null; - if (checkThreadTx) - checkEmptyTransactions(); + if (checkThreadTx) { + checkEmptyTransactionsEx(() -> String.format(CACHE_NAME_AND_OPERATION_FORMAT, cacheName, + "dynamicStartCache")); + } try { DynamicCacheChangeRequest req = prepareCacheChangeRequest( @@ -3786,8 +3644,16 @@ public IgniteInternalFuture dynamicStartCachesByStoredConf( boolean disabledAfterStart, IgniteUuid restartId ) { - if (checkThreadTx) - checkEmptyTransactions(); + if (checkThreadTx) { + checkEmptyTransactionsEx(() -> { + List cacheNames = storedCacheDataList.stream() + .map(StoredCacheData::config) + .map(CacheConfiguration::getName) + .collect(Collectors.toList()); + + return String.format(CACHE_NAMES_AND_OPERATION_FORMAT, cacheNames, "dynamicStartCachesByStoredConf"); + }); + } List srvReqs = null; Map clientReqs = null; @@ -3879,8 +3745,10 @@ public IgniteInternalFuture dynamicDestroyCache( ) { assert cacheName != null; - if (checkThreadTx) - checkEmptyTransactions(); + if (checkThreadTx) { + checkEmptyTransactionsEx(() -> String.format(CACHE_NAME_AND_OPERATION_FORMAT, cacheName, + "dynamicDestroyCache")); + } DynamicCacheChangeRequest req = DynamicCacheChangeRequest.stopRequest(ctx, cacheName, sql, true); @@ -3912,8 +3780,10 @@ public IgniteInternalFuture dynamicDestroyCaches( boolean checkThreadTx, boolean destroy ) { - if (checkThreadTx) - checkEmptyTransactions(); + if (checkThreadTx) { + checkEmptyTransactionsEx(() -> String.format(CACHE_NAMES_AND_OPERATION_FORMAT, cacheNames, + "dynamicDestroyCaches")); + } List reqs = new ArrayList<>(cacheNames.size()); @@ -3999,7 +3869,7 @@ IgniteInternalFuture dynamicCloseCache(String cacheName) { if (proxy == null || proxy.isProxyClosed()) return new GridFinishedFuture<>(); // No-op. - checkEmptyTransactions(); + checkEmptyTransactionsEx(() -> String.format(CACHE_NAME_AND_OPERATION_FORMAT, cacheName, "dynamicCloseCache")); if (proxy.context().isLocal()) return dynamicDestroyCache(cacheName, false, true, false, null); @@ -4014,11 +3884,14 @@ IgniteInternalFuture dynamicCloseCache(String cacheName) { * @return Future that will be completed when state is changed for all caches. */ public IgniteInternalFuture resetCacheState(Collection cacheNames) { - checkEmptyTransactions(); - if (F.isEmpty(cacheNames)) cacheNames = cachesInfo.registeredCaches().keySet(); + Collection forCheckCacheNames = cacheNames; + + checkEmptyTransactionsEx(() -> String.format(CACHE_NAME_AND_OPERATION_FORMAT, forCheckCacheNames, + "resetCacheState")); + Collection reqs = new ArrayList<>(cacheNames.size()); for (String cacheName : cacheNames) { @@ -4068,9 +3941,19 @@ else if (DataStructuresProcessor.isDataStructureCache(cacheName)) public void saveCacheConfiguration(DynamicCacheDescriptor desc) throws IgniteCheckedException { assert desc != null; - if (sharedCtx.pageStore() != null && !sharedCtx.kernalContext().clientNode() && - isPersistentCache(desc.cacheConfiguration(), sharedCtx.gridConfig().getDataStorageConfiguration())) - sharedCtx.pageStore().storeCacheData(desc.toStoredData(), true); + locCfgMgr.saveCacheConfiguration(desc.toStoredData(), true); + } + + /** + * Save cache configuration to persistent store if necessary. + * + * @param storedCacheData Stored cache data. + * @param overwrite Overwrite existing. + */ + public void saveCacheConfiguration(StoredCacheData storedCacheData, boolean overwrite) throws IgniteCheckedException { + assert storedCacheData != null; + + locCfgMgr.saveCacheConfiguration(storedCacheData, overwrite); } /** @@ -4169,28 +4052,28 @@ private Collection initiateCacheChanges( * Authorize creating cache. * * @param cfg Cache configuration. - * @param secCtx Optional security context. */ - private void authorizeCacheCreate(CacheConfiguration cfg, SecurityContext secCtx) { - ctx.security().authorize(null, SecurityPermission.CACHE_CREATE, secCtx); + void authorizeCacheCreate(CacheConfiguration cfg) { + if(cfg != null) { + ctx.security().authorize(cfg.getName(), SecurityPermission.CACHE_CREATE); - if (cfg != null && cfg.isOnheapCacheEnabled() && - IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_DISABLE_ONHEAP_CACHE)) - throw new SecurityException("Authorization failed for enabling on-heap cache."); + if (cfg.isOnheapCacheEnabled() && + IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_DISABLE_ONHEAP_CACHE)) + throw new SecurityException("Authorization failed for enabling on-heap cache."); + } } /** - * Authorize dynamic cache management for this node. + * Authorize dynamic cache management. * * @param req start/stop cache request. */ private void authorizeCacheChange(DynamicCacheChangeRequest req) { - // Null security context means authorize this node. if (req.cacheType() == null || req.cacheType() == CacheType.USER) { if (req.stop()) - ctx.security().authorize(null, SecurityPermission.CACHE_DESTROY, null); + ctx.security().authorize(req.cacheName(), SecurityPermission.CACHE_DESTROY); else - authorizeCacheCreate(req.startCacheConfiguration(), null); + authorizeCacheCreate(req.startCacheConfiguration()); } } @@ -4610,23 +4493,16 @@ public IgniteInternalCache getOrStartCache( * @return All configured cache instances. */ public Collection> caches() { - return F.viewReadOnly(jCacheProxies.values(), new IgniteClosure, - IgniteInternalCache>() { - @Override public IgniteInternalCache apply(IgniteCacheProxy entries) { - return entries.internalProxy(); - } - }); + return F.viewReadOnly(jCacheProxies.values(), + (IgniteClosure, IgniteInternalCache>)IgniteCacheProxy::internalProxy); } /** * @return All configured cache instances. */ public Collection> jcaches() { - return F.viewReadOnly(jCacheProxies.values(), new IgniteClosure, IgniteCacheProxy>() { - @Override public IgniteCacheProxy apply(IgniteCacheProxyImpl proxy) { - return proxy.gatewayWrapper(); - } - }); + return F.viewReadOnly(jCacheProxies.values(), + (IgniteClosure, IgniteCacheProxy>)IgniteCacheProxyImpl::gatewayWrapper); } /** @@ -4718,7 +4594,7 @@ public IgniteCacheProxy publicJCache(String cacheName) throws Ignit * @throws IgniteCheckedException If failed. */ @SuppressWarnings({"unchecked", "ConstantConditions"}) - @Nullable public IgniteCacheProxy publicJCache(String cacheName, + public @Nullable IgniteCacheProxy publicJCache(String cacheName, boolean failIfNotStarted, boolean checkThreadTx) throws IgniteCheckedException { assert cacheName != null; @@ -4744,10 +4620,13 @@ public IgniteCacheProxy publicJCache(String cacheName) throws Ignit } /** - * Get configuration for the given cache. + * Get configuration for the given cache. Fails if cache does not exist or restarting. * * @param name Cache name. * @return Cache configuration. + * @throws org.apache.ignite.IgniteCacheRestartingException If the cache with the given name + * is currently restarting. + * @throws IllegalStateException If the cache with the given name does not exist. */ public CacheConfiguration cacheConfiguration(String name) { assert name != null; @@ -4772,6 +4651,20 @@ public CacheConfiguration cacheConfiguration(String name) { return desc.cacheConfiguration(); } + /** + * Get configuration for the given cache. If a cache with the given name does not exist, will return {@code null}. + * + * @param name Cache name. + * @return Cache configuration or {@code null}. + */ + public CacheConfiguration cacheConfigurationNoProxyCheck(String name) { + assert name != null; + + DynamicCacheDescriptor desc = cacheDescriptor(name); + + return desc == null ? null : desc.cacheConfiguration(); + } + /** * Get registered cache descriptor. * @@ -4820,7 +4713,7 @@ public Map cacheGroupDescriptors() { * @param cacheId Cache ID. * @return Cache descriptor. */ - @Nullable public DynamicCacheDescriptor cacheDescriptor(int cacheId) { + public @Nullable DynamicCacheDescriptor cacheDescriptor(int cacheId) { for (DynamicCacheDescriptor cacheDesc : cacheDescriptors().values()) { CacheConfiguration ccfg = cacheDesc.cacheConfiguration(); @@ -4917,7 +4810,7 @@ public IgniteCacheProxy jcache(String name) { * @param awaitInit Await proxy initialization. * @return Cache proxy. */ - @Nullable public IgniteCacheProxyImpl jcacheProxy(String name, boolean awaitInit) { + public @Nullable IgniteCacheProxyImpl jcacheProxy(String name, boolean awaitInit) { IgniteCacheProxyImpl cache = jCacheProxies.get(name); if (awaitInit) @@ -4931,7 +4824,7 @@ public IgniteCacheProxy jcache(String name) { * @param proxy Cache proxy. * @return Previous cache proxy. */ - @Nullable public IgniteCacheProxyImpl addjCacheProxy(String name, IgniteCacheProxyImpl proxy) { + public @Nullable IgniteCacheProxyImpl addjCacheProxy(String name, IgniteCacheProxyImpl proxy) { return jCacheProxies.putIfAbsent(name, proxy); } @@ -5196,12 +5089,29 @@ public void checkEmptyTransactions() throws IgniteException { throw new IgniteException("Cannot start/stop cache within lock or transaction."); } + /** + * Method invoke {@link #checkEmptyTransactions()} and add message in case exception. + * + * @param eMsgSupplier supplier additional text message + * @throws IgniteException If {@link #checkEmptyTransactions()} throw {@link IgniteException} + * */ + private void checkEmptyTransactionsEx(final Supplier eMsgSupplier) throws IgniteException { + assert eMsgSupplier != null; + + try { + checkEmptyTransactions(); + } + catch (IgniteException e) { + throw new IgniteException(e.getMessage() + ' ' + eMsgSupplier.get(), e); + } + } + /** * @param val Object to check. * @return Configuration copy. * @throws IgniteCheckedException If validation failed. */ - private CacheConfiguration cloneCheckSerializable(final CacheConfiguration val) throws IgniteCheckedException { + CacheConfiguration cloneCheckSerializable(final CacheConfiguration val) throws IgniteCheckedException { if (val == null) return null; @@ -5528,6 +5438,71 @@ public void setTxOwnerDumpRequestsAllowed(boolean allowed) { compute.broadcast(new TxOwnerDumpRequestAllowedSettingClosure(allowed)); } + /** + * Sets threshold timeout in milliseconds for long transactions, if transaction exceeds it, + * it will be dumped in log with information about how much time did + * it spent in system time (time while aquiring locks, preparing, commiting, etc.) + * and user time (time when client node runs some code while holding transaction). + * Can be set to 0 - no transactions will be dumped in log in this case. + * + * @param threshold Threshold timeout in milliseconds. + */ + public void longTransactionTimeDumpThreshold(long threshold) { + assert threshold >= 0 : "Threshold timeout must be greater than or equal to 0."; + + broadcastToNodesSupportingFeature( + new LongRunningTxTimeDumpSettingsClosure(threshold, null, null), + LRT_SYSTEM_USER_TIME_DUMP_SETTINGS + ); + } + + /** + * Sets the coefficient for samples of long running transactions that will be dumped in log, if + * {@link #longTransactionTimeDumpThreshold} is set to non-zero value." + * + * @param coefficient Coefficient, must be value between 0.0 and 1.0 inclusively. + */ + public void transactionTimeDumpSamplesCoefficient(double coefficient) { + assert coefficient >= 0.0 && coefficient <= 1.0 : "Percentage value must be between 0.0 and 1.0 inclusively."; + + broadcastToNodesSupportingFeature( + new LongRunningTxTimeDumpSettingsClosure(null, coefficient, null), + LRT_SYSTEM_USER_TIME_DUMP_SETTINGS + ); + } + + /** + * Sets the limit of samples of completed transactions that will be dumped in log per second, + * if {@link #transactionTimeDumpSamplesCoefficient} is above 0.0. + * Must be integer value greater than 0. + * + * @param limit Limit value. + */ + public void longTransactionTimeDumpSamplesPerSecondLimit(int limit) { + assert limit > 0 : "Limit value must be greater than 0."; + + broadcastToNodesSupportingFeature( + new LongRunningTxTimeDumpSettingsClosure(null, null, limit), + LRT_SYSTEM_USER_TIME_DUMP_SETTINGS + ); + } + + /** + * Broadcasts given job to nodes that support ignite feature. + * + * @param job Ignite job. + * @param feature Ignite feature. + */ + private void broadcastToNodesSupportingFeature(IgniteRunnable job, IgniteFeatures feature) { + ClusterGroup grp = ctx.grid() + .cluster() + .forPredicate(node -> IgniteFeatures.nodeSupports(node, feature)); + + IgniteCompute compute = ctx.grid().compute(grp); + + compute.broadcast(job); + } + /** * Recovery lifecycle for caches. */ @@ -5537,7 +5512,9 @@ private class CacheRecoveryLifecycle implements MetastorageLifecycleListener, Da /** {@inheritDoc} */ @Override public void onReadyForRead(ReadOnlyMetastorage metastorage) throws IgniteCheckedException { - restoreCacheConfigurations(); + CacheJoinNodeDiscoveryData data = locCfgMgr.restoreCacheConfigurations(); + + cachesInfo.onStart(data); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java index efa844de3df06..8a78dc15a8b69 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheProxyImpl.java @@ -102,7 +102,11 @@ public GridCacheProxyImpl( gate = ctx.gate(); - aff = new GridCacheAffinityProxy<>(ctx, ctx.cache().affinity()); + GridCacheAdapter adapter = ctx.cache(); + if (adapter == null) + throw new IllegalStateException(new CacheStoppedException(ctx.name())); + + aff = new GridCacheAffinityProxy<>(ctx, adapter.affinity()); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java index 8a643369d983d..171fa642f925c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheUtils.java @@ -813,6 +813,13 @@ public static GridNearTxLocal txStartInternal(GridCacheContext ctx, IgniteIntern return prj.txStartEx(concurrency, isolation); } + /** + * Alias for {@link #txString(IgniteInternalTx)}. + */ + public static String txDump(@Nullable IgniteInternalTx tx) { + return txString(tx); + } + /** * @param tx Transaction. * @return String view of all safe-to-print transaction properties. @@ -832,6 +839,7 @@ public static String txString(@Nullable IgniteInternalTx tx) { ", rollbackOnly=" + tx.isRollbackOnly() + ", nodeId=" + tx.nodeId() + ", timeout=" + tx.timeout() + + ", startTime=" + tx.startTime() + ", duration=" + (U.currentTimeMillis() - tx.startTime()) + (tx instanceof GridNearTxLocal ? ", label=" + tx.label() : "") + ']'; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridLocalConfigManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridLocalConfigManager.java new file mode 100644 index 0000000000000..5654e20de1316 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridLocalConfigManager.java @@ -0,0 +1,274 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Deque; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Set; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.pagemem.store.IgnitePageStoreManager; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteUuid; + +import static org.apache.ignite.internal.processors.cache.GridCacheUtils.isPersistentCache; + +/** + * Responsible for restoring local cache configurations (both from static configuration and persistence). + * Keep stop sequence of caches and caches which were presented on node before node join. + */ +public class GridLocalConfigManager { + /** */ + private final boolean startClientCaches = + IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_START_CACHES_ON_JOIN, false); + + /** Caches stop sequence. */ + private final Deque stopSeq = new LinkedList<>(); + + /** Logger. */ + private final IgniteLogger log; + + /** Node's local caches on start (both from static configuration and from persistent caches). */ + private Set localCachesOnStart; + + /** Cache processor. */ + private final GridCacheProcessor cacheProcessor; + + /** Context. */ + private final GridKernalContext ctx; + + /** + * @param cacheProcessor Cache processor. + * @param kernalCtx Kernal context. + */ + public GridLocalConfigManager( + GridCacheProcessor cacheProcessor, + GridKernalContext kernalCtx + ) { + this.cacheProcessor = cacheProcessor; + ctx = kernalCtx; + log = ctx.log(getClass()); + } + + /** + * Save cache configuration to persistent store if necessary. + * + * @param storedCacheData Stored cache data. + * @param overwrite Overwrite existing. + */ + public void saveCacheConfiguration(StoredCacheData storedCacheData, boolean overwrite) throws IgniteCheckedException { + assert storedCacheData != null; + + GridCacheSharedContext sharedContext = cacheProcessor.context(); + + if (sharedContext.pageStore() != null + && !sharedContext.kernalContext().clientNode() + && isPersistentCache(storedCacheData.config(), sharedContext.gridConfig().getDataStorageConfiguration())) + sharedContext.pageStore().storeCacheData(storedCacheData, overwrite); + } + + /** + * + */ + public Collection stopSequence() { + return stopSeq; + } + + /** + * @return Caches to be started when this node starts. + */ + public Set localCachesOnStart() { + return localCachesOnStart; + } + + /** + * @throws IgniteCheckedException If failed. + */ + public CacheJoinNodeDiscoveryData restoreCacheConfigurations() throws IgniteCheckedException { + if (ctx.isDaemon()) + return null; + + Map caches = new HashMap<>(); + + Map templates = new HashMap<>(); + + restoreCaches(caches, templates, ctx.config(), ctx.cache().context().pageStore()); + + CacheJoinNodeDiscoveryData discoData = new CacheJoinNodeDiscoveryData( + IgniteUuid.randomUuid(), + caches, + templates, + startAllCachesOnClientStart() + ); + + localCachesOnStart = new HashSet<>(discoData.caches().keySet()); + + return discoData; + } + + /** + * @return {@code True} if need locally start all existing caches on client node start. + */ + private boolean startAllCachesOnClientStart() { + return startClientCaches && ctx.clientNode(); + } + + /** + * @param caches Caches accumulator. + * @param templates Templates accumulator. + * @param config Ignite configuration. + * @param pageStoreManager Page store manager. + */ + private void restoreCaches( + Map caches, + Map templates, + IgniteConfiguration config, + IgnitePageStoreManager pageStoreManager + ) throws IgniteCheckedException { + assert !config.isDaemon() : "Trying to restore cache configurations on daemon node."; + + CacheConfiguration[] cfgs = config.getCacheConfiguration(); + + for (int i = 0; i < cfgs.length; i++) { + CacheConfiguration cfg = new CacheConfiguration(cfgs[i]); + + // Replace original configuration value. + cfgs[i] = cfg; + + addCacheFromConfiguration(cfg, false, caches, templates); + } + + if (CU.isPersistenceEnabled(config) && pageStoreManager != null) { + Map storedCaches = pageStoreManager.readCacheConfigurations(); + + if (!F.isEmpty(storedCaches)) { + List skippedConfigs = new ArrayList<>(); + + for (StoredCacheData storedCacheData : storedCaches.values()) { + String cacheName = storedCacheData.config().getName(); + + CacheType type = ctx.cache().cacheType(cacheName); + + if (!caches.containsKey(cacheName)) + // No static cache - add the configuration. + addStoredCache(caches, storedCacheData, cacheName, type, true, false); + else { + addStoredCache(caches, storedCacheData, cacheName, type, true, + cacheProcessor.keepStaticCacheConfiguration()); + + if (!cacheProcessor.keepStaticCacheConfiguration() && type == CacheType.USER) + skippedConfigs.add(cacheName); + + } + } + + if (!F.isEmpty(skippedConfigs)) { + U.warn(log, "Static configuration for the following caches will be ignored because a persistent " + + "cache with the same name already exist (see " + + "https://apacheignite.readme.io/docs/cache-configuration for more information): " + + skippedConfigs); + } + } + } + } + + /** + * Add stored cache data to caches storage. + * + * @param caches Cache storage. + * @param cacheData Cache data to add. + * @param cacheName Cache name. + * @param cacheType Cache type. + * @param isStaticallyConfigured Statically configured flag. + */ + private void addStoredCache( + Map caches, + StoredCacheData cacheData, + String cacheName, + CacheType cacheType, + boolean persistedBefore, + boolean isStaticallyConfigured + ) { + if (!caches.containsKey(cacheName)) { + if (!cacheType.userCache()) + stopSeq.addLast(cacheName); + else + stopSeq.addFirst(cacheName); + } + + caches.put(cacheName, new CacheJoinNodeDiscoveryData.CacheInfo(cacheData, cacheType, cacheData.sql(), + persistedBefore ? 1 : 0, isStaticallyConfigured)); + } + + /** + * @param cfg Cache configuration. + * @param sql SQL flag. + * @param caches Caches map. + * @param templates Templates map. + * @throws IgniteCheckedException If failed. + */ + private void addCacheFromConfiguration( + CacheConfiguration cfg, + boolean sql, + Map caches, + Map templates + ) throws IgniteCheckedException { + String cacheName = cfg.getName(); + + CU.validateCacheName(cacheName); + + cacheProcessor.cloneCheckSerializable(cfg); + + CacheObjectContext cacheObjCtx = ctx.cacheObjects().contextForCache(cfg); + + // Initialize defaults. + cacheProcessor.initialize(cfg, cacheObjCtx); + + StoredCacheData cacheData = new StoredCacheData(cfg); + + cacheData.sql(sql); + + if (GridCacheUtils.isCacheTemplateName(cacheName)) + templates.put(cacheName, new CacheJoinNodeDiscoveryData.CacheInfo(cacheData, CacheType.USER, false, 0, true)); + else { + if (caches.containsKey(cacheName)) { + throw new IgniteCheckedException("Duplicate cache name found (check configuration and " + + "assign unique name to each cache): " + cacheName); + } + + CacheType cacheType = ctx.cache().cacheType(cacheName); + + if (cacheType != CacheType.USER && cfg.getDataRegionName() == null) + cfg.setDataRegionName(cacheProcessor.context().database().systemDateRegionName()); + + addStoredCache(caches, cacheData, cacheName, cacheType, false, true); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LongRunningTxTimeDumpSettingsClosure.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LongRunningTxTimeDumpSettingsClosure.java new file mode 100644 index 0000000000000..95c6ea0b1ed72 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/LongRunningTxTimeDumpSettingsClosure.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import org.apache.ignite.Ignite; +import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager; +import org.apache.ignite.lang.IgniteRunnable; +import org.apache.ignite.resources.IgniteInstanceResource; + +/** + * Closure that is sent on all server nodes in order to change configuration parameters + * of dumping long running transactions' system and user time values. + */ +public class LongRunningTxTimeDumpSettingsClosure implements IgniteRunnable { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private final Long timeoutThreshold; + + /** */ + private final Double samplesCoefficient; + + /** */ + private final Integer samplesPerSecondLimit; + + /** + * Auto-inject Ignite instance + */ + @IgniteInstanceResource + private Ignite ignite; + + /** */ + public LongRunningTxTimeDumpSettingsClosure( + Long timeoutThreshold, + Double samplesCoefficient, + Integer samplesPerSecondLimit + ) { + this.timeoutThreshold = timeoutThreshold; + this.samplesCoefficient = samplesCoefficient; + this.samplesPerSecondLimit = samplesPerSecondLimit; + } + + /** {@inheritDoc} */ + @Override + public void run() { + IgniteTxManager tm = ((IgniteEx) ignite).context().cache().context().tm(); + + if (timeoutThreshold != null) + tm.longTransactionTimeDumpThreshold(timeoutThreshold); + + if (samplesCoefficient != null) + tm.transactionTimeDumpSamplesCoefficient(samplesCoefficient); + + if (samplesPerSecondLimit != null) + tm.transactionTimeDumpSamplesPerSecondLimit(samplesPerSecondLimit); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionTxUpdateCounterImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionTxUpdateCounterImpl.java index fd58b29cf477f..14eb004f38969 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionTxUpdateCounterImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/PartitionTxUpdateCounterImpl.java @@ -74,6 +74,9 @@ public class PartitionTxUpdateCounterImpl implements PartitionUpdateCounter { /** HWM. */ protected final AtomicLong reserveCntr = new AtomicLong(); + /** */ + private boolean first = true; + /** * Initial counter points to last sequential update after WAL recovery. * @deprecated TODO FIXME https://issues.apache.org/jira/browse/IGNITE-11794 @@ -84,7 +87,7 @@ public class PartitionTxUpdateCounterImpl implements PartitionUpdateCounter { @Override public void init(long initUpdCntr, @Nullable byte[] cntrUpdData) { cntr.set(initUpdCntr); - initCntr = initUpdCntr; + reserveCntr.set(initCntr = initUpdCntr); queue = fromBytes(cntrUpdData); } @@ -120,11 +123,14 @@ protected synchronized long highestAppliedCounter() { // Reserved update counter is updated only on exchange. long cur = get(); - // Special case: single node in topology. - if (val == 0) - reserveCntr.set(cur); + // Always set reserved counter equal to max known counter. + long max = Math.max(val, cur); - if (val < cur) // Outdated counter (txs are possible before current topology future is finished). + if (reserveCntr.get() < max) + reserveCntr.set(max); + + // Outdated counter (txs are possible before current topology future is finished if primary is not changed). + if (val < cur) return; // Absolute counter should be not less than last applied update. @@ -133,15 +139,17 @@ protected synchronized long highestAppliedCounter() { if (val < highestAppliedCounter()) throw new IgniteCheckedException("Failed to update the counter [newVal=" + val + ", curState=" + this + ']'); - if (reserveCntr.get() < val) - reserveCntr.set(val); // Adjust counter on new primary. - cntr.set(val); - // If some holes are present at this point, that means some update were missed on recovery and will be restored - // during rebalance. All gaps are safe to "forget". - if (!queue.isEmpty()) - queue.clear(); + /** If some holes are present at this point, thar means some update were missed on recovery and will be restored + * during rebalance. All gaps are safe to "forget". + * Should only do it for first PME (later missed updates on node left are reset in {@link #finalizeUpdateCounters}. */ + if (first) { + if (!queue.isEmpty()) + queue.clear(); + + first = false; + } } /** {@inheritDoc} */ @@ -220,7 +228,10 @@ else if (last.within(start) && last.within(start + delta - 1)) @Override public void updateInitial(long start, long delta) { update(start, delta); - reserveCntr.set(initCntr = get()); + initCntr = get(); + + if (reserveCntr.get() < initCntr) + reserveCntr.set(initCntr); } /** */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java index 14e47b1bc6156..f73fec11a4b25 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/datastructures/CacheDataStructuresManager.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashSet; -import java.util.Iterator; import java.util.Map; import java.util.UUID; import java.util.concurrent.Callable; @@ -33,6 +32,7 @@ import java.util.concurrent.ConcurrentMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; +import javax.cache.Cache; import javax.cache.event.CacheEntryEvent; import javax.cache.event.CacheEntryUpdatedListener; import org.apache.ignite.Ignite; @@ -40,6 +40,7 @@ import org.apache.ignite.IgniteSet; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.cache.CacheEntryEventSerializableFilter; +import org.apache.ignite.cache.CachePeekMode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.internal.IgniteKernal; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; @@ -49,7 +50,6 @@ import org.apache.ignite.internal.processors.cache.GridCacheGateway; import org.apache.ignite.internal.processors.cache.GridCacheManagerAdapter; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; -import org.apache.ignite.internal.processors.cache.KeyCacheObject; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; import org.apache.ignite.internal.processors.datastructures.GridAtomicCacheQueueImpl; import org.apache.ignite.internal.processors.datastructures.GridCacheQueueHeader; @@ -62,7 +62,6 @@ import org.apache.ignite.internal.processors.datastructures.GridTransactionalCacheQueueImpl; import org.apache.ignite.internal.processors.datastructures.SetItemKey; import org.apache.ignite.internal.processors.task.GridInternal; -import org.apache.ignite.internal.util.GridConcurrentHashSet; import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; @@ -71,7 +70,6 @@ import org.apache.ignite.resources.IgniteInstanceResource; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import java.util.concurrent.ConcurrentHashMap; import static javax.cache.event.EventType.REMOVED; import static org.apache.ignite.cache.CacheMode.PARTITIONED; @@ -107,10 +105,6 @@ public class CacheDataStructuresManager extends GridCacheManagerAdapter { /** Sets map. */ private final ConcurrentMap setsMap; - /** Set keys used for set iteration. */ - private ConcurrentMap> setDataMap = - new ConcurrentHashMap<>(); - /** Queues map. */ private final ConcurrentMap queuesMap; @@ -347,45 +341,6 @@ private void waitInitialization() throws IgniteCheckedException { } } - /** - * Entry update callback. - * - * @param key Key. - * @param rmv {@code True} if entry was removed. - * @param keepBinary Keep binary flag. - */ - public void onEntryUpdated(KeyCacheObject key, boolean rmv, boolean keepBinary) { - // No need to notify data structures manager for a user cache since all DS objects are stored - // in system caches. - if (cctx.userCache()) - return; - - Object key0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(key, keepBinary, false); - - if (key0 instanceof SetItemKey) - onSetItemUpdated((SetItemKey)key0, rmv); - } - - /** - * Partition evicted callback. - * - * @param part Partition number. - */ - public void onPartitionEvicted(int part) { - GridCacheAffinityManager aff = cctx.affinity(); - - for (GridConcurrentHashSet set : setDataMap.values()) { - Iterator iter = set.iterator(); - - while (iter.hasNext()) { - SetItemKey key = iter.next(); - - if (aff.partition(key) == part) - iter.remove(); - } - } - } - /** * @param name Set name. * @param colloc Collocated flag. @@ -466,14 +421,6 @@ public boolean knownType(Object obj) { return obj == null || KNOWN_CLS.contains(obj.getClass()); } - /** - * @param id Set ID. - * @return Data for given set. - */ - @Nullable public GridConcurrentHashSet setData(IgniteUuid id) { - return setDataMap.get(id); - } - /** * @param setId Set ID. * @param topVer Topology version. @@ -491,22 +438,19 @@ private void removeSetData(IgniteUuid setId, AffinityTopologyVersion topVer) thr cctx.preloader().syncFuture().get(); } - GridConcurrentHashSet set = setDataMap.get(setId); - - if (set == null) - return; - - IgniteInternalCache cache = cctx.cache(); + IgniteInternalCache cache = cctx.cache(); final int BATCH_SIZE = 100; Collection keys = new ArrayList<>(BATCH_SIZE); - for (SetItemKey key : set) { - if (!loc && !aff.primaryByKey(cctx.localNode(), key, topVer)) + for (Cache.Entry entry : cache.localEntries(new CachePeekMode[] {CachePeekMode.PRIMARY})) { + Object obj = entry.getKey(); + + if (!(obj instanceof SetItemKey && setId.equals(((SetItemKey)obj).setId()))) continue; - keys.add(key); + keys.add((SetItemKey)obj); if (keys.size() == BATCH_SIZE) { retryRemoveAll(cache, keys); @@ -517,8 +461,6 @@ private void removeSetData(IgniteUuid setId, AffinityTopologyVersion topVer) thr if (!keys.isEmpty()) retryRemoveAll(cache, keys); - - setDataMap.remove(setId); } /** @@ -608,30 +550,6 @@ private boolean pingNodes(Collection nodes) throws IgniteCheckedExc return true; } - /** - * @param key Set item key. - * @param rmv {@code True} if item was removed. - */ - private void onSetItemUpdated(SetItemKey key, boolean rmv) { - GridConcurrentHashSet set = setDataMap.get(key.setId()); - - if (set == null) { - if (rmv) - return; - - GridConcurrentHashSet old = setDataMap.putIfAbsent(key.setId(), - set = new GridConcurrentHashSet<>()); - - if (old != null) - set = old; - } - - if (rmv) - set.remove(key); - else - set.add(key); - } - /** * @param setId Set ID. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedCacheEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedCacheEntry.java index d4dc59d05221a..aa271400b1f0e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedCacheEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedCacheEntry.java @@ -642,7 +642,7 @@ public void doneRemote( /** * Rechecks if lock should be reassigned. */ - public void recheck() { + public CacheLockCandidates recheck(GridCacheMvccCandidate checkingCandidate) { CacheLockCandidates prev = null; CacheLockCandidates owner = null; @@ -675,7 +675,9 @@ public void recheck() { } // This call must be made outside of synchronization. - checkOwnerChanged(prev, owner, val); + checkOwnerChanged(prev, owner, val, checkingCandidate); + + return owner; } /** {@inheritDoc} */ @@ -748,15 +750,22 @@ protected void checkCallbacks(boolean emptyBefore, boolean emptyAfter) { // Allow next lock in the thread to proceed. if (!cand.used()) { + if (cand.owner()) + break; + GridCacheContext cctx0 = cand.parent().context(); GridDistributedCacheEntry e = (GridDistributedCacheEntry)cctx0.cache().peekEx(cand.parent().key()); - if (e != null) - e.recheck(); - - break; + if (e != null) { + CacheLockCandidates newOnwer = e.recheck(owner); + if(newOnwer == null || !newOnwer.hasCandidate(cand.version())) + // the lock from the chain hasn't been acquired, no sense to check the rest of the chain + break; + } + else + break; } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java index c4f54e5af7f93..20ade3ad87950 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/GridDistributedTxRemoteAdapter.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -72,6 +73,7 @@ import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.transactions.TransactionConcurrency; @@ -85,6 +87,8 @@ import static org.apache.ignite.internal.processors.cache.GridCacheOperation.READ; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.RELOAD; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.UPDATE; +import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.RENTING; +import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.EVICTED; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_BACKUP; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE; import static org.apache.ignite.transactions.TransactionState.COMMITTED; @@ -496,6 +500,9 @@ private void commitIfLocked() throws IgniteCheckedException { cctx.database().checkpointReadLock(); + // Reserved partitions (necessary to prevent race due to updates in RENTING state). + Set reservedParts = new HashSet<>(); + try { Collection entries = near() || cctx.snapshot().needTxReadLogging() ? allEntries() : writeEntries(); @@ -509,7 +516,37 @@ private void commitIfLocked() throws IgniteCheckedException { for (IgniteTxEntry txEntry : entries) { GridCacheContext cacheCtx = txEntry.context(); - boolean replicate = cacheCtx.isDrEnabled(); + // Prevent stale updates. + GridDhtLocalPartition locPart = + cacheCtx.group().topology().localPartition(txEntry.cached().partition()); + + boolean reserved = false; + + if (!near() && locPart != null && !reservedParts.contains(locPart) && + (!(reserved = locPart.reserve()) || locPart.state() == RENTING)) { + LT.warn(log(), "Skipping update to partition that is concurrently evicting " + + "[grp=" + cacheCtx.group().cacheOrGroupName() + ", part=" + locPart + "]"); + + // Reserved RENTING partition. + if (reserved) { + assert locPart.state() != EVICTED && locPart.reservations() > 0; + + reservedParts.add(locPart); + } + + continue; + } + + if (reserved) { + assert locPart.state() != EVICTED && locPart.reservations() > 0; + + reservedParts.add(locPart); + } + + assert near() || locPart == null || + !(locPart.state() == RENTING || locPart.state() == EVICTED) : locPart; + + boolean replicate = cacheCtx.isDrEnabled(); while (true) { try { @@ -807,6 +844,9 @@ else if (!near()){ } } finally { + for (GridDhtLocalPartition locPart : reservedParts) + locPart.release(); + cctx.database().checkpointReadUnlock(); if (wrapper != null) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java index 4c00300e590b8..6cdc5de57c925 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtCacheAdapter.java @@ -1575,8 +1575,7 @@ private void updateTtl(GridCacheAdapter cache, * @param curVer Current topology version. * @return {@code True} if cache affinity changed and operation should be remapped. */ - protected final boolean needRemap(AffinityTopologyVersion expVer, AffinityTopologyVersion curVer, - Collection keys) { + protected final boolean needRemap(AffinityTopologyVersion expVer, AffinityTopologyVersion curVer) { if (curVer.equals(expVer)) return false; @@ -1585,24 +1584,21 @@ protected final boolean needRemap(AffinityTopologyVersion expVer, AffinityTopolo if (curVer.compareTo(lastAffChangedTopVer) >= 0 && curVer.compareTo(expVer) <= 0) return false; - // TODO IGNITE-7164 check mvcc crd for mvcc enabled txs. + Collection cacheNodes0 = ctx.discovery().cacheGroupAffinityNodes(ctx.groupId(), expVer); + Collection cacheNodes1 = ctx.discovery().cacheGroupAffinityNodes(ctx.groupId(), curVer); - for (KeyCacheObject key : keys) { - assert key.partition() != -1; + if (!cacheNodes0.equals(cacheNodes1) || ctx.affinity().affinityTopologyVersion().compareTo(curVer) < 0) + return true; - try { - List aff1 = ctx.affinity().assignments(expVer).get(key.partition()); - List aff2 = ctx.affinity().assignments(curVer).get(key.partition()); + try { + List> aff1 = ctx.affinity().assignments(expVer); + List> aff2 = ctx.affinity().assignments(curVer); - if (!aff1.containsAll(aff2) || aff2.isEmpty() || !aff1.get(0).equals(aff2.get(0))) - return true; - } - catch (IllegalStateException ignored) { - return true; - } + return !aff1.equals(aff2); + } + catch (IllegalStateException ignored) { + return true; } - - return false; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java index 35f2ceafef80a..9c700fc17a83e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTransactionalCacheAdapter.java @@ -938,7 +938,7 @@ public IgniteInternalFuture lockAllAsync( } try { - if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion(), req.keys())) { + if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion())) { if (log.isDebugEnabled()) { log.debug("Client topology version mismatch, need remap lock request [" + "reqTopVer=" + req.topologyVersion() + @@ -1043,7 +1043,7 @@ public IgniteInternalFuture lockAllAsync( } try { - if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion(), req.keys())) { + if (top != null && needRemap(req.topologyVersion(), top.readyTopologyVersion())) { if (log.isDebugEnabled()) { log.debug("Client topology version mismatch, need remap lock request [" + "reqTopVer=" + req.topologyVersion() + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java index 38c532b93a3b3..75df45e9f2feb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/GridDhtTxPrepareFuture.java @@ -73,6 +73,7 @@ import org.apache.ignite.internal.processors.dr.GridDrType; import org.apache.ignite.internal.processors.timeout.GridTimeoutObjectAdapter; import org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException; +import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException; import org.apache.ignite.internal.util.GridLeanSet; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -694,7 +695,12 @@ private boolean mapIfLocked() { } if (forceKeysFut == null || (forceKeysFut.isDone() && forceKeysFut.error() == null)) - prepare0(); + try { + prepare0(); + } + catch (IgniteTxRollbackCheckedException e) { + onError(e); + } else { forceKeysFut.listen(new CI1>() { @Override public void apply(IgniteInternalFuture f) { @@ -1218,7 +1224,7 @@ private IgniteTxOptimisticCheckedException versionCheckError(IgniteTxEntry entry /** * */ - private void prepare0() { + private void prepare0() throws IgniteTxRollbackCheckedException { boolean error = false; try { @@ -1391,8 +1397,12 @@ private void sendPrepareRequests() { break; } - catch (GridCacheEntryRemovedException ignore) { - assert false : "Got removed exception on entry with dht local candidate: " + entry; + catch (GridCacheEntryRemovedException e) { + log.error("Got removed exception on entry with dht local candidate. Transaction will be " + + "rolled back. Entry: " + entry + " tx: " + CU.txDump(tx), e); + + // Entry was unlocked by concurrent rollback. + onError(tx.rollbackException()); } idx++; @@ -1413,8 +1423,12 @@ private void sendPrepareRequests() { break; } - catch (GridCacheEntryRemovedException ignore) { - assert false : "Got removed exception on entry with dht local candidate: " + entry; + catch (GridCacheEntryRemovedException e) { + log.error("Got removed exception on entry with dht local candidate. Transaction will be " + + "rolled back. Entry: " + entry + " tx: " + CU.txDump(tx), e); + + // Entry was unlocked by concurrent rollback. + onError(tx.rollbackException()); } } } @@ -1488,8 +1502,13 @@ private void sendPrepareRequests() { } break; - } catch (GridCacheEntryRemovedException ignore) { - assert false : "Got removed exception on entry with dht local candidate: " + entry; + } + catch (GridCacheEntryRemovedException e) { + log.error("Got removed exception on entry with dht local candidate. Transaction will be " + + "rolled back. Entry: " + entry + " tx: " + CU.txDump(tx), e); + + // Entry was unlocked by concurrent rollback. + onError(tx.rollbackException()); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastRequest.java index 7ce0a879b4b0f..63ae825cea763 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastRequest.java @@ -20,12 +20,13 @@ import java.nio.ByteBuffer; import java.util.Collection; import org.apache.ignite.internal.GridDirectCollection; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.GridCacheIdMessage; -import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionUpdateCountersMessage; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; import org.apache.ignite.plugin.extensions.communication.MessageReader; import org.apache.ignite.plugin.extensions.communication.MessageWriter; +import org.jetbrains.annotations.NotNull; /** */ public class PartitionCountersNeighborcastRequest extends GridCacheIdMessage { @@ -39,15 +40,27 @@ public class PartitionCountersNeighborcastRequest extends GridCacheIdMessage { /** */ private IgniteUuid futId; + /** Topology version. */ + private AffinityTopologyVersion topVer; + /** */ public PartitionCountersNeighborcastRequest() { } /** */ public PartitionCountersNeighborcastRequest( - Collection updCntrs, IgniteUuid futId) { + Collection updCntrs, + IgniteUuid futId, + @NotNull AffinityTopologyVersion topVer + ) { this.updCntrs = updCntrs; this.futId = futId; + this.topVer = topVer; + } + + /** {@inheritDoc} */ + @Override public AffinityTopologyVersion topologyVersion() { + return topVer; } /** @@ -86,6 +99,12 @@ public IgniteUuid futId() { writer.incrementState(); case 5: + if (!writer.writeAffinityTopologyVersion("topVer", topVer)) + return false; + + writer.incrementState(); + + case 6: if (!writer.writeCollection("updCntrs", updCntrs, MessageCollectionItemType.MSG)) return false; @@ -116,6 +135,14 @@ public IgniteUuid futId() { reader.incrementState(); case 5: + topVer = reader.readAffinityTopologyVersion("topVer"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 6: updCntrs = reader.readCollection("updCntrs", MessageCollectionItemType.MSG); if (!reader.isLastRead()) @@ -135,7 +162,7 @@ public IgniteUuid futId() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 6; + return 7; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastResponse.java index 093c40925e212..e21472ecee481 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/PartitionCountersNeighborcastResponse.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.cache.distributed.dht; import java.nio.ByteBuffer; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.GridCacheIdMessage; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.plugin.extensions.communication.MessageReader; @@ -31,13 +32,25 @@ public class PartitionCountersNeighborcastResponse extends GridCacheIdMessage { /** */ private IgniteUuid futId; + /** Topology version. */ + private AffinityTopologyVersion topVer; + /** */ public PartitionCountersNeighborcastResponse() { } /** */ - public PartitionCountersNeighborcastResponse(IgniteUuid futId) { + public PartitionCountersNeighborcastResponse( + IgniteUuid futId, + AffinityTopologyVersion topVer + ) { this.futId = futId; + this.topVer = topVer; + } + + /** {@inheritDoc} */ + @Override public AffinityTopologyVersion topologyVersion() { + return topVer; } /** @@ -68,6 +81,12 @@ public IgniteUuid futId() { writer.incrementState(); + case 5: + if (!writer.writeAffinityTopologyVersion("topVer", topVer)) + return false; + + writer.incrementState(); + } return true; @@ -92,6 +111,14 @@ public IgniteUuid futId() { reader.incrementState(); + case 5: + topVer = reader.readAffinityTopologyVersion("topVer"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + } return reader.afterMessageRead(PartitionCountersNeighborcastResponse.class); @@ -104,7 +131,7 @@ public IgniteUuid futId() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 5; + return 6; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java index 5cdb2009207d9..6f5898b157819 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/atomic/GridDhtAtomicCache.java @@ -1745,7 +1745,7 @@ private void updateAllAsyncInternal0( // Can not wait for topology future since it will break // GridNearAtomicCheckUpdateRequest processing. remap = !top.topologyVersionFuture().exchangeDone() || - needRemap(req.topologyVersion(), top.readyTopologyVersion(), req.keys()); + needRemap(req.topologyVersion(), top.readyTopologyVersion()); } if (!remap) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java index d0a63211489ea..e7cbdf770a5c6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionDemander.java @@ -27,6 +27,7 @@ import java.util.Optional; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -52,6 +53,7 @@ import org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException; import org.apache.ignite.internal.processors.cache.GridCachePartitionExchangeManager; import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.RebalanceStatisticsUtils.RebalanceFutureStatistics; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; @@ -73,10 +75,17 @@ import org.apache.ignite.spi.IgniteSpiException; import org.jetbrains.annotations.Nullable; +import static java.lang.System.currentTimeMillis; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static java.util.Objects.nonNull; +import static java.util.stream.Collectors.toList; +import static java.util.stream.Collectors.toMap; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_OBJECT_LOADED; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_PART_LOADED; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_STARTED; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_STOPPED; +import static org.apache.ignite.internal.processors.cache.distributed.dht.preloader.RebalanceStatisticsUtils.rebalanceStatistics; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_PRELOAD; @@ -115,6 +124,10 @@ public class GridDhtPartitionDemander { /** Cached rebalance topics. */ private final Map rebalanceTopics; + /** Futures involved in the last rebalance. For statistics. */ + @GridToStringExclude + private final Collection lastStatFutures = new ConcurrentLinkedQueue<>(); + /** * @param grp Ccahe group. */ @@ -361,17 +374,18 @@ Runnable addAssignments( } return () -> { - if (next != null) - fut.listen(f -> { - try { - if (f.get()) // Not cancelled. - next.run(); // Starts next cache rebalancing (according to the order). - } - catch (IgniteCheckedException e) { - if (log.isDebugEnabled()) - log.debug(e.getMessage()); - } - }); + fut.listen(f -> { + try { + printRebalanceStatistics(); + + if (f.get() && nonNull(next)) + next.run(); + } + catch (IgniteCheckedException e) { + if (log.isDebugEnabled()) + log.debug(e.getMessage()); + } + }); requestPartitions(fut, assignments); }; @@ -517,6 +531,8 @@ private void requestPartitions(final RebalanceFuture fut, GridDhtPreloaderAssign return; try { + fut.stat.addMessageStatistics(topicId, node); + ctx.io().sendOrderedMessage(node, rebalanceTopics.get(topicId), demandMsg.convertIfNeeded(node.version()), grp.ioPolicy(), demandMsg.timeout()); @@ -673,9 +689,9 @@ public void handleSupplyMessage( final RebalanceFuture fut = rebalanceFut; - try { - fut.cancelLock.readLock().lock(); + fut.cancelLock.readLock().lock(); + try { ClusterNode node = ctx.node(nodeId); if (node == null) { @@ -743,6 +759,8 @@ public void handleSupplyMessage( } try { + fut.stat.addReceivePartitionStatistics(topicId, ctx.node(nodeId), supplyMsg); + AffinityAssignment aff = grp.affinity().cachedAffinity(topVer); // Preload. @@ -1037,6 +1055,10 @@ public static class RebalanceFuture extends GridFutureAdapter { * to partition. */ private final ReentrantReadWriteLock cancelLock; + /** Rebalance statistics */ + @GridToStringExclude + final RebalanceFutureStatistics stat = new RebalanceFutureStatistics(); + /** * @param grp Cache group. * @param assignments Assignments. @@ -1082,7 +1104,7 @@ public static class RebalanceFuture extends GridFutureAdapter { this.log = null; this.rebalanceId = -1; this.routines = 0; - this.cancelLock = null; + this.cancelLock = new ReentrantReadWriteLock(); } /** @@ -1103,7 +1125,7 @@ private boolean isActual(long rebalanceId) { /** * @return Is initial (created at demander creation). */ - private boolean isInitial() { + public boolean isInitial() { return topVer == null; } @@ -1113,11 +1135,11 @@ private boolean isInitial() { * @return {@code True}. */ @Override public boolean cancel() { - try { - // Cancel lock is needed only for case when some message might be on the fly while rebalancing is - // cancelled. - cancelLock.writeLock().lock(); + // Cancel lock is needed only for case when some message might be on the fly while rebalancing is + // cancelled. + cancelLock.writeLock().lock(); + try { synchronized (this) { if (isDone()) return true; @@ -1349,4 +1371,74 @@ public String toString() { return S.toString(RebalanceFuture.class, this); } } + + /** + * Collect demander per cache groups. For print statistics. + * + * @return List demanders. + * */ + private List demanders(){ + return ctx.cacheContexts().stream() + .map(GridCacheContext::preloader) + .filter(GridDhtPreloader.class::isInstance) + .map(GridDhtPreloader.class::cast) + .map(GridDhtPreloader::demander) + .collect(toList()); + } + + /** + * Print rebalance statistics into log. + * Statistic will print if + * {@link RebalanceStatisticsUtils#printRebalanceStatistics() + * printRebalanceStatistics()} == true. + * To use correctly you need to call this method exactly once right after + * {@code RebalanceFuture} was completed (successfully or not). + *

+ * If {@link #rebalanceFut} was done successfully, prints statistics + * for cache group. + *

+ * If the whole rebalance is over, print statistics for all cache groups. + * The end of the rebalance is determined by the successful done all + * {@code RebalanceFuture}'s. + * + * @throws IgniteCheckedException when get result {@code RebalanceFuture} + * @see RebalanceFuture RebalanceFuture + */ + private void printRebalanceStatistics() throws IgniteCheckedException { + if (!RebalanceStatisticsUtils.printRebalanceStatistics()) + return; + + RebalanceFuture currRebFut = rebalanceFut; + assert currRebFut.isDone() : "RebalanceFuture should be done."; + + currRebFut.stat.endTime(currentTimeMillis()); + lastStatFutures.add(currRebFut); + + if (currRebFut.get()) //Success rebalance for current cache group + log.info(rebalanceStatistics(false, singletonMap(grp, singletonList(currRebFut)))); + else + return; + + for (GridCacheContext gridCacheContext : ctx.cacheContexts()) { + IgniteInternalFuture rebalanceFuture = gridCacheContext.preloader().rebalanceFuture(); + + if (!rebalanceFuture.isDone() || !rebalanceFuture.get()) //Rebalance not done or not success + return; + } + + List demanders = demanders(); + + Map> rebFutrs = + demanders.stream().collect(toMap(demander -> demander.grp, demander -> demander.lastStatFutures)); + + try { + log.info(rebalanceStatistics(true, rebFutrs)); + } + finally { + demanders.forEach(demander -> { + demander.rebalanceFut.stat.clear(); + demander.lastStatFutures.clear(); + }); + } + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java index 5f3188fc3145a..0cdf265c724b7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPartitionsExchangeFuture.java @@ -42,6 +42,7 @@ import java.util.concurrent.locks.ReadWriteLock; import java.util.stream.Collectors; import java.util.stream.Stream; +import javax.cache.expiry.EternalExpiryPolicy; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; @@ -143,6 +144,9 @@ public class GridDhtPartitionsExchangeFuture extends GridDhtTopologyFutureAdapte /** */ public static final String EXCHANGE_LOG = "org.apache.ignite.internal.exchange.time"; + /** Partition state failed message. */ + public static final String PARTITION_STATE_FAILED_MSG = "Partition states validation has failed for group: %s, msg: %s"; + /** */ private static final int RELEASE_FUTURE_DUMP_THRESHOLD = IgniteSystemProperties.getInteger(IGNITE_PARTITION_RELEASE_FUTURE_DUMP_THRESHOLD, 0); @@ -198,6 +202,9 @@ public class GridDhtPartitionsExchangeFuture extends GridDhtTopologyFutureAdapte /** */ private AtomicBoolean added = new AtomicBoolean(false); + /** Exchange type. */ + private volatile ExchangeType exchangeType; + /** * Discovery event receive latch. There is a race between discovery event processing and single message * processing, so it is possible to create an exchange future before the actual discovery event is received. @@ -348,8 +355,8 @@ public class GridDhtPartitionsExchangeFuture extends GridDhtTopologyFutureAdapte /** Discovery lag / Clocks discrepancy, calculated on coordinator when all single messages are received. */ private T2 discoveryLag; - /** Partitions scheduled for historical reblanace for this topology version. */ - private Map> histPartitions; + /** Partitions scheduled for clearing before rebalance for this topology version. */ + private Map> clearingPartitions; /** * @param cctx Cache context. @@ -478,6 +485,13 @@ public void affinityChangeMessage(CacheAffinityChangeMessage affChangeMsg) { return isDone() ? result() : exchCtx.events().topologyVersion(); } + /** + * @return Exchange type or null if not determined yet. + */ + public ExchangeType exchangeType() { + return exchangeType; + } + /** * Retreives the node which has WAL history since {@code cntrSince}. * @@ -843,6 +857,8 @@ else if (msg instanceof WalStateAbstractMessage) cctx.cache().registrateProxyRestart(resolveCacheRequests(exchActions), afterLsnrCompleteFut); + exchangeType = exchange; + for (PartitionsExchangeAware comp : cctx.exchange().exchangeAwareComponents()) comp.onInitBeforeTopologyLock(this); @@ -1420,7 +1436,7 @@ private void distributedExchange() throws IgniteCheckedException { cctx.exchange().exchangerBlockingSectionEnd(); } - histPartitions = new HashMap(); + clearingPartitions = new HashMap(); timeBag.finishGlobalStage("WAL history reservation"); @@ -2161,7 +2177,7 @@ private String exchangeTimingsLogMessage(String header, List timings) { if (drCacheCtx.isDrEnabled()) { try { - drCacheCtx.dr().onExchange(res, exchId.isLeft(), activateCluster()); + drCacheCtx.dr().onExchange(res, exchId.isLeft()); } catch (IgniteCheckedException e) { U.error(log, "Failed to notify DR: " + e, e); @@ -3458,12 +3474,13 @@ private void finishExchangeOnCoordinator(@Nullable Collection sndRe if (discoveryCustomMessage instanceof DynamicCacheChangeBatch) { if (exchActions != null) { - assignPartitionsStates(); Set caches = exchActions.cachesToResetLostPartitions(); if (!F.isEmpty(caches)) resetLostPartitions(caches); + + assignPartitionsStates(); } } else if (discoveryCustomMessage instanceof SnapshotDiscoveryMessage @@ -3686,13 +3703,16 @@ private void validatePartitionsState() { // Do not validate read or write through caches or caches with disabled rebalance // or ExpiryPolicy is set or validation is disabled. + boolean eternalExpiryPolicy = grpCtx != null && (grpCtx.config().getExpiryPolicyFactory() == null + || grpCtx.config().getExpiryPolicyFactory().create() instanceof EternalExpiryPolicy); + if (grpCtx == null || grpCtx.config().isReadThrough() || grpCtx.config().isWriteThrough() || grpCtx.config().getCacheStoreFactory() != null || grpCtx.config().getRebalanceDelay() == -1 || grpCtx.config().getRebalanceMode() == CacheRebalanceMode.NONE - || grpCtx.config().getExpiryPolicyFactory() == null + || !eternalExpiryPolicy || SKIP_PARTITION_SIZE_VALIDATION) return null; @@ -3700,7 +3720,7 @@ private void validatePartitionsState() { validator.validatePartitionCountersAndSizes(GridDhtPartitionsExchangeFuture.this, top, msgs); } catch (IgniteCheckedException ex) { - log.warning("Partition states validation has failed for group: " + grpCtx.cacheOrGroupName() + ". " + ex.getMessage()); + log.warning(String.format(PARTITION_STATE_FAILED_MSG, grpCtx.cacheOrGroupName(), ex.getMessage())); // TODO: Handle such errors https://issues.apache.org/jira/browse/IGNITE-7833 } @@ -5000,34 +5020,35 @@ public static long nextDumpTimeout(int step, long timeout) { * * @param grp Group. * @param part Partition. - * @return {@code True} if partition is historical. + * @return {@code True} if partition has to be cleared before rebalance. */ - public boolean isHistoryPartition(CacheGroupContext grp, int part) { + public boolean isClearingPartition(CacheGroupContext grp, int part) { if (!grp.persistenceEnabled()) return false; synchronized (mux) { - if (histPartitions == null) + if (clearingPartitions == null) return false; - Set parts = histPartitions.get(grp.groupId()); + Set parts = clearingPartitions.get(grp.groupId()); return parts != null && parts.contains(part); } } /** - * Marks a partition for historical rebalance. + * Marks a partition for clearing before rebalance. + * Fully cleared partitions should never be historically rebalanced. * * @param grp Group. * @param part Partition. */ - public void addHistoryPartition(CacheGroupContext grp, int part) { + public void addClearingPartition(CacheGroupContext grp, int part) { if (!grp.persistenceEnabled()) return; synchronized (mux) { - histPartitions.computeIfAbsent(grp.groupId(), k -> new HashSet()).add(part); + clearingPartitions.computeIfAbsent(grp.groupId(), k -> new HashSet()).add(part); } } @@ -5067,7 +5088,7 @@ public void cleanUp() { /** * */ - enum ExchangeType { + public enum ExchangeType { /** */ CLIENT, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java index b1297aadd72ed..adbfc5fdf1a6c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/preloader/GridDhtPreloader.java @@ -281,7 +281,7 @@ private IgniteCheckedException stopError() { histSupplier = ctx.discovery().node(nodeId); } - if (histSupplier != null && exchFut.isHistoryPartition(grp, p)) { + if (histSupplier != null && !exchFut.isClearingPartition(grp, p)) { assert grp.persistenceEnabled(); assert remoteOwners(p, topVer).contains(histSupplier) : remoteOwners(p, topVer); @@ -299,6 +299,11 @@ private IgniteCheckedException stopError() { msg.partitions().addHistorical(p, part.initialUpdateCounter(), countersMap.updateCounter(p), partitions); } else { + // If for some reason (for example if supplier fails and new supplier is elected) partition is + // assigned for full rebalance force clearing if not yet set. + if (grp.persistenceEnabled() && exchFut != null && !exchFut.isClearingPartition(grp, p)) + part.clearAsync(); + List picked = remoteOwners(p, topVer); if (picked.isEmpty()) { @@ -620,4 +625,13 @@ private GridDhtFuture request0(GridCacheContext cctx, Collection> msgStats = new ConcurrentHashMap<>(); + + /** Is needed or not to print rebalance statistics. */ + private final boolean printRebalanceStatistics = printRebalanceStatistics(); + + /** + * Add new message statistics. + * Requires to be invoked before demand message sending. + * This method required for {@code addReceivePartitionStatistics}. + * This method add new message statistics if + * {@link #printRebalanceStatistics} == true. + * + * @param topicId Topic id, require not null. + * @param supplierNode Supplier node, require not null. + * @see RebalanceMessageStatistics + * @see #addReceivePartitionStatistics(Integer, ClusterNode, GridDhtPartitionSupplyMessage) + */ + public void addMessageStatistics(final Integer topicId, final ClusterNode supplierNode) { + assert nonNull(topicId); + assert nonNull(supplierNode); + + if (!printRebalanceStatistics) + return; + + msgStats.computeIfAbsent(topicId, integer -> new ConcurrentHashMap<>()) + .put(supplierNode, new RebalanceMessageStatistics(currentTimeMillis())); + } + + /** + * Add new statistics by receive message with partitions from supplier + * node. Require invoke {@code addMessageStatistics} before send + * demand message. This method add new message statistics if + * {@link #printRebalanceStatistics} == true. + * + * @param topicId Topic id, require not null. + * @param supplierNode Supplier node, require not null. + * @param supplyMsg Supply message, require not null. + * @see ReceivePartitionStatistics + * @see #addMessageStatistics(Integer, ClusterNode) + */ + public void addReceivePartitionStatistics( + final Integer topicId, + final ClusterNode supplierNode, + final GridDhtPartitionSupplyMessage supplyMsg + ) { + assert nonNull(topicId); + assert nonNull(supplierNode); + assert nonNull(supplyMsg); + + if (!printRebalanceStatistics) + return; + + List partStats = supplyMsg.infos().entrySet().stream() + .map(entry -> new PartitionStatistics(entry.getKey(), entry.getValue().infos().size())) + .collect(toList()); + + msgStats.get(topicId).get(supplierNode).receivePartStats + .add(new ReceivePartitionStatistics(currentTimeMillis(), supplyMsg.messageSize(), partStats)); + } + + /** + * Clear statistics. + */ + public void clear() { + msgStats.clear(); + } + + /** + * Set end rebalance time in mills. + * + * @param endTime End rebalance time in mills. + */ + public void endTime(final long endTime) { + this.endTime = endTime; + } + } + + /** Rebalance messages statistics. */ + static class RebalanceMessageStatistics { + /** Time send demand message in mills. */ + private final long sndMsgTime; + + /** Statistics by received partitions. */ + private final Collection receivePartStats = new ConcurrentLinkedQueue<>(); + + /** + * Constructor. + * + * @param sndMsgTime time send demand message. + */ + public RebalanceMessageStatistics(final long sndMsgTime) { + this.sndMsgTime = sndMsgTime; + } + } + + /** Receive partition statistics. */ + static class ReceivePartitionStatistics { + /** Time receive message(on demand message) with partition in mills. */ + private final long rcvMsgTime; + + /** Size receive message in bytes. */ + private final long msgSize; + + /** Received partitions. */ + private final List parts; + + /** + * Constructor. + * + * @param rcvMsgTime time receive message in mills. + * @param msgSize message size in bytes. + * @param parts received partitions, require not null. + */ + public ReceivePartitionStatistics( + final long rcvMsgTime, + final long msgSize, + final List parts + ) { + assert nonNull(parts); + + this.rcvMsgTime = rcvMsgTime; + this.msgSize = msgSize; + this.parts = parts; + } + } + + /** Received partition info. */ + static class PartitionStatistics { + /** Partition id. */ + private final int id; + + /** Count entries in partition. */ + private final int entryCount; + + /** + * Constructor. + * + * @param id partition id. + * @param entryCount count entries in partitions. + */ + public PartitionStatistics(final int id, final int entryCount) { + this.id = id; + this.entryCount = entryCount; + } + } + + /** + * Finds out if statistics can be printed regarding + * {@link IgniteSystemProperties#IGNITE_QUIET}, + * {@link IgniteSystemProperties#IGNITE_WRITE_REBALANCE_STATISTICS}. + * + * @return Is print statistics enabled. + */ + public static boolean printRebalanceStatistics() { + return !getBoolean(IGNITE_QUIET, true) && getBoolean(IGNITE_WRITE_REBALANCE_STATISTICS, false); + } + + /** + * Finds out if partitions distribution can be printed regarding + * {@link IgniteSystemProperties#IGNITE_WRITE_REBALANCE_PARTITION_STATISTICS}. + * + * @return Is print partitions distribution enabled. + */ + public static boolean printPartitionsDistribution() { + return getBoolean(IGNITE_WRITE_REBALANCE_PARTITION_STATISTICS, false); + } + + /** + * Return rebalance statistics. Required to call this method if + * {@link #printRebalanceStatistics()} == true. + *

+ * Flag {@code finish} should reflect was full rebalance finished or not. + *
+ * If {@code finish} == true then expected {@code rebFutrs} contains + * successful or not {@code RebalanceFuture} per cache group, else expected + * {@code rebFutrs} contains only one successful {@code RebalanceFuture}. + *
+ * If {@code finish} == true then print total statistics. + *

+ * Partition distribution is printed only for last success rebalance, + * per cache group. + * + * @param finish Is the whole rebalance finished or not. + * @param rebFutrs Involved in rebalance, require not null. + * @return String with printed rebalance statistics. + * @throws IgniteCheckedException Could be thrown while getting result of + * {@code RebalanceFuture}. + * @see RebalanceFuture RebalanceFuture + */ + public static String rebalanceStatistics( + final boolean finish, + final Map> rebFutrs + ) throws IgniteCheckedException { + assert nonNull(rebFutrs); + assert printRebalanceStatistics() : "Can't print statistics"; + + AtomicInteger nodeCnt = new AtomicInteger(); + + Map nodeAliases = toRebalanceFutureStream(rebFutrs) + .flatMap(future -> future.stat.msgStats.entrySet().stream()) + .flatMap(entry -> entry.getValue().keySet().stream()) + .distinct() + .collect(toMap(identity(), node -> nodeCnt.getAndIncrement())); + + StringJoiner joiner = new StringJoiner(" "); + + if (finish) + writeTotalRebalanceStatistics(rebFutrs, nodeAliases, joiner); + + writeCacheGroupsRebalanceStatistics(rebFutrs, nodeAliases, finish, joiner); + writeAliasesRebalanceStatistics("p - partitions, e - entries, b - bytes, d - duration", nodeAliases, joiner); + writePartitionsDistributionRebalanceStatistics(rebFutrs, nodeAliases, nodeCnt, joiner); + + return joiner.toString(); + } + + /** + * Write total statistics for rebalance. + * + * @param rebFutrs Participating in successful and not rebalances, require not null. + * @param nodeAliases For print nodeId=1 instead long string, require not null. + * @param joiner For write statistics, require not null. + */ + private static void writeTotalRebalanceStatistics( + final Map> rebFutrs, + final Map nodeAliases, + final StringJoiner joiner + ) { + assert nonNull(rebFutrs); + assert nonNull(nodeAliases); + assert nonNull(joiner); + + long minStartTime = minStartTime(toRebalanceFutureStream(rebFutrs)); + long maxEndTime = maxEndTime(toRebalanceFutureStream(rebFutrs)); + + joiner.add("Total information (" + SUCCESSFUL_OR_NOT_REBALANCE_TEXT + "):") + .add("Time").add("[" + toStartEndDuration(minStartTime, maxEndTime) + "]"); + + Map> topicStat = + toTopicStatistics(toRebalanceFutureStream(rebFutrs)); + writeTopicRebalanceStatistics(topicStat, joiner); + + Map> supplierStat = + toSupplierStatistics(toRebalanceFutureStream(rebFutrs)); + writeSupplierRebalanceStatistics(supplierStat, nodeAliases, joiner); + } + + /** + * Write rebalance statistics per cache group. + *

+ * If {@code finish} == true then add {@link #SUCCESSFUL_OR_NOT_REBALANCE_TEXT} else add {@link + * #SUCCESSFUL_REBALANCE_TEXT} into header. + * + * @param rebFutrs Participating in successful and not rebalances, require not null. + * @param nodeAliases For print nodeId=1 instead long string, require not null. + * @param joiner For write statistics, require not null. + * @param finish Is finish rebalance. + */ + private static void writeCacheGroupsRebalanceStatistics( + final Map> rebFutrs, + final Map nodeAliases, + final boolean finish, + final StringJoiner joiner + ) { + assert nonNull(rebFutrs); + assert nonNull(nodeAliases); + assert nonNull(joiner); + + joiner.add("Information per cache group (" + + (finish ? SUCCESSFUL_OR_NOT_REBALANCE_TEXT : SUCCESSFUL_REBALANCE_TEXT) + "):"); + + rebFutrs.forEach((context, futures) -> { + long minStartTime = minStartTime(futures.stream()); + long maxEndTime = maxEndTime(futures.stream()); + + joiner.add("[id=" + context.groupId() + ",") + .add("name=" + context.cacheOrGroupName() + ",") + .add(toStartEndDuration(minStartTime, maxEndTime) + "]"); + + Map> topicStat = toTopicStatistics(futures.stream()); + writeTopicRebalanceStatistics(topicStat, joiner); + + Map> supplierStat = toSupplierStatistics(futures.stream()); + writeSupplierRebalanceStatistics(supplierStat, nodeAliases, joiner); + }); + } + + /** + * Write partitions distribution per cache group. Only for last success rebalance. + * Works if {@link #printPartitionsDistribution()} return true. + * + * @param rebFutrs Participating in successful and not rebalances, require not null. + * @param nodeAliases For print nodeId=1 instead long string, require not null. + * @param nodeCnt For adding new nodes into {@code nodeAliases}, require not null. + * @param joiner For write statistics, require not null. + * @throws IgniteCheckedException When get result of + * {@link RebalanceFuture}. + */ + private static void writePartitionsDistributionRebalanceStatistics( + final Map> rebFutrs, + final Map nodeAliases, + final AtomicInteger nodeCnt, + final StringJoiner joiner + ) throws IgniteCheckedException { + assert nonNull(rebFutrs); + assert nonNull(nodeAliases); + assert nonNull(nodeCnt); + assert nonNull(joiner); + + if (!printPartitionsDistribution()) + return; + + joiner.add("Partitions distribution per cache group (" + SUCCESSFUL_REBALANCE_TEXT + "):"); + + Comparator startTimeCmp = comparingLong(fut -> fut.stat.startTime); + Comparator startTimeCmpReversed = startTimeCmp.reversed(); + + Comparator partIdCmp = comparingInt(value -> value.id); + Comparator nodeAliasesCmp = comparingInt(nodeAliases::get); + + for (Entry> rebFutrsEntry : rebFutrs.entrySet()) { + CacheGroupContext cacheGrpCtx = rebFutrsEntry.getKey(); + + joiner.add("[id=" + cacheGrpCtx.groupId() + ",") + .add("name=" + cacheGrpCtx.cacheOrGroupName() + "]"); + + List successFutures = new ArrayList<>(); + + for (RebalanceFuture rebalanceFuture : rebFutrsEntry.getValue()) { + if (rebalanceFuture.isDone() && rebalanceFuture.get()) + successFutures.add(rebalanceFuture); + } + + if (successFutures.isEmpty()) + return; + + successFutures.sort(startTimeCmpReversed); + + RebalanceFuture lastSuccessFuture = successFutures.get(0); + + AffinityAssignment affinity = cacheGrpCtx.affinity().cachedAffinity(lastSuccessFuture.topologyVersion()); + + Map supplierNodeRcvParts = new TreeMap<>(partIdCmp); + + for (Entry> topicStatEntry : lastSuccessFuture.stat + .msgStats.entrySet()) { + for (Entry supplierStatEntry : topicStatEntry.getValue().entrySet()) { + for (ReceivePartitionStatistics receivePartStat : supplierStatEntry.getValue().receivePartStats) { + for (PartitionStatistics partStat : receivePartStat.parts) + supplierNodeRcvParts.put(partStat, supplierStatEntry.getKey()); + } + } + } + + affinity.nodes().forEach(node -> nodeAliases.computeIfAbsent(node, node1 -> nodeCnt.getAndIncrement())); + + for (Entry supplierNodeRcvPart : supplierNodeRcvParts.entrySet()) { + int partId = supplierNodeRcvPart.getKey().id; + + String nodes = affinity.get(partId).stream() + .sorted(nodeAliasesCmp) + .map(node -> "[" + nodeAliases.get(node) + + (affinity.primaryPartitions(node.id()).contains(partId) ? ",pr" : ",bu") + + (node.equals(supplierNodeRcvPart.getValue()) ? ",su" : "") + "]" + ) + .collect(joining(",")); + + joiner.add(valueOf(partId)).add("=").add(nodes); + } + } + + writeAliasesRebalanceStatistics("pr - primary, bu - backup, su - supplier node", nodeAliases, joiner); + } + + /** + * Write statistics per topic. + * + * @param topicStat Statistics by topics (in successful and not rebalances), require not null. + * @param joiner For write statistics, require not null. + */ + private static void writeTopicRebalanceStatistics( + final Map> topicStat, + final StringJoiner joiner + ) { + assert nonNull(topicStat); + assert nonNull(joiner); + + joiner.add("Topic statistics:"); + + topicStat.forEach((topicId, msgStats) -> { + long partCnt = sum(msgStats, rps -> rps.parts.size()); + long byteSum = sum(msgStats, rps -> rps.msgSize); + long entryCount = sum(msgStats, rps -> rps.parts.stream().mapToLong(ps -> ps.entryCount).sum()); + + joiner.add("[id=" + topicId + ",") + .add(toPartitionsEntriesBytes(partCnt, entryCount, byteSum) + "]"); + }); + } + + /** + * Write stattistics per supplier node. + * + * @param supplierStat Statistics by supplier (in successful and not rebalances), require not null. + * @param nodeAliases For print nodeId=1 instead long string, require not null. + * @param joiner For write statistics, require not null. + */ + private static void writeSupplierRebalanceStatistics( + final Map> supplierStat, + final Map nodeAliases, + final StringJoiner joiner + ) { + assert nonNull(supplierStat); + assert nonNull(nodeAliases); + assert nonNull(joiner); + + joiner.add("Supplier statistics:"); + + supplierStat.forEach((supplierNode, msgStats) -> { + long partCnt = sum(msgStats, rps -> rps.parts.size()); + long byteSum = sum(msgStats, rps -> rps.msgSize); + long entryCount = sum(msgStats, rps -> rps.parts.stream().mapToLong(ps -> ps.entryCount).sum()); + + long durationSum = msgStats.stream() + .flatMapToLong(msgStat -> msgStat.receivePartStats.stream() + .mapToLong(rps -> rps.rcvMsgTime - msgStat.sndMsgTime) + ) + .sum(); + + joiner.add("[nodeId=" + nodeAliases.get(supplierNode) + ",") + .add(toPartitionsEntriesBytes(partCnt, entryCount, byteSum) + ",") + .add("d=" + durationSum + " ms]"); + }); + } + + /** + * Write statistics aliases, for reducing output string. + * + * @param nodeAliases for print nodeId=1 instead long string, require not null. + * @param abbreviations Abbreviations ex. b - bytes, require not null. + * @param joiner For write statistics, require not null. + */ + private static void writeAliasesRebalanceStatistics( + final String abbreviations, + final Map nodeAliases, + final StringJoiner joiner + ) { + assert nonNull(abbreviations); + assert nonNull(nodeAliases); + assert nonNull(joiner); + + String nodes = nodeAliases.entrySet().stream() + .sorted(comparingInt(Entry::getValue)) + .map(entry -> "[" + entry.getValue() + "=" + entry.getKey().id() + "," + entry.getKey().consistentId() + "]") + .collect(joining(", ")); + + joiner.add("Aliases:").add(abbreviations + ",").add("nodeId mapping (nodeId=id,consistentId)").add(nodes); + } + + /** + * Convert time in millis to local date time. + * + * @param time Time in mills. + * @return The local date-time. + */ + private static LocalDateTime toLocalDateTime(final long time) { + return new Date(time).toInstant().atZone(systemDefault()).toLocalDateTime(); + } + + /** + * Get min {@link RebalanceFutureStatistics#startTime} in stream rebalance future's. + * + * @param stream Stream rebalance future, require not null. + * @return Min start time. + */ + private static long minStartTime(final Stream stream) { + assert nonNull(stream); + + return stream.mapToLong(value -> value.stat.startTime).min().orElse(0); + } + + /** + * Get max {@link RebalanceFutureStatistics#endTime} in stream rebalance future's. + * + * @param stream Stream rebalance future's, require not null. + * @return Max end time. + */ + private static long maxEndTime(final Stream stream) { + assert nonNull(stream); + + return stream.mapToLong(value -> value.stat.endTime).max().orElse(0); + } + + /** + * Prepare stream rebalance future's of each cache groups. + * + * @param rebFutrs Rebalance future's by cache groups, require not null. + * @return Stream rebalance future's. + */ + private static Stream toRebalanceFutureStream( + final Map> rebFutrs + ) { + assert nonNull(rebFutrs); + + return rebFutrs.entrySet().stream().flatMap(entry -> entry.getValue().stream()); + } + + /** + * Aggregates statistics by topic number. + * + * @param stream Stream rebalance future's, require not null. + * @return Statistic by topics. + */ + private static Map> toTopicStatistics( + final Stream stream) { + assert nonNull(stream); + + return stream.flatMap(future -> future.stat.msgStats.entrySet().stream()) + .collect(groupingBy( + Entry::getKey, + mapping( + entry -> entry.getValue().values(), + of( + ArrayList::new, + Collection::addAll, + (ms1, ms2) -> { + ms1.addAll(ms2); + return ms1; + } + ) + ) + )); + } + + /** + * Aggregates statistics by supplier node. + * + * @param stream Stream rebalance future's, require not null. + * @return Statistic by supplier. + */ + private static Map> toSupplierStatistics( + final Stream stream + ) { + assert nonNull(stream); + + return stream.flatMap(future -> future.stat.msgStats.entrySet().stream()) + .flatMap(entry -> entry.getValue().entrySet().stream()) + .collect(groupingBy(Entry::getKey, mapping(Entry::getValue, toList()))); + } + + /** + * Creates a string containing the beginning, end, and duration of the rebalance. + * + * @param start Start time in ms. + * @param end End time in ms. + * @return Formatted string of rebalance time. + * @see #REBALANCE_STATISTICS_DTF + */ + private static String toStartEndDuration(final long start, final long end) { + return "start=" + REBALANCE_STATISTICS_DTF.format(toLocalDateTime(start)) + ", end=" + + REBALANCE_STATISTICS_DTF.format(toLocalDateTime(end)) + ", d=" + (end - start) + " ms"; + } + + /** + * Summarizes long values. + * + * @param msgStats Message statistics, require not null. + * @param longExtractor Long extractor, require not null. + * @return Sum of long values. + */ + private static long sum( + final List msgStats, + final ToLongFunction longExtractor + ) { + assert nonNull(msgStats); + assert nonNull(longExtractor); + + return msgStats.stream() + .flatMap(msgStat -> msgStat.receivePartStats.stream()) + .mapToLong(longExtractor) + .sum(); + } + + /** + * Create a string containing count received partitions, + * count received entries and sum received bytes. + * + * @param parts Count received partitions. + * @param entries Count received entries. + * @param bytes Sum received bytes. + * @return Formatted string of received rebalance partitions. + */ + private static String toPartitionsEntriesBytes(final long parts, final long entries, final long bytes) { + return "p=" + parts + ", e=" + entries + ", b=" + bytes; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtLocalPartition.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtLocalPartition.java index 354a7851a61b1..a2fc6a5ce29b8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtLocalPartition.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtLocalPartition.java @@ -162,9 +162,6 @@ public class GridDhtLocalPartition extends GridCacheConcurrentMapImpl implements * reservation is released. */ private volatile boolean delayedRenting; - /** Set if partition must be cleared in MOVING state. */ - private volatile boolean clear; - /** Set if topology update sequence should be updated on partition destroy. */ private boolean updateSeqOnDestroy; @@ -488,7 +485,7 @@ private void release0(int sizeChange) { if (reservations == 0) return; - assert getPartState(state) != EVICTED : getPartState(state); + assert getPartState(state) != EVICTED : this; long newState = setReservations(state, --reservations); newState = setSize(newState, getSize(newState) + sizeChange); @@ -498,12 +495,14 @@ private void release0(int sizeChange) { // Decrement reservations. if (this.state.compareAndSet(state, newState)) { // If no more reservations try to continue delayed renting. - if (reservations == 0 && delayedRenting) - rent(true); + if (reservations == 0) { + if (delayedRenting) + rent(true); + else if (getPartState(state) == RENTING) + tryContinueClearing(); + } - // Partition could be only reserved in OWNING state so no further actions - // are required. - break; + return; } } } @@ -551,6 +550,8 @@ private boolean casState(long state, GridDhtPartitionState toState) { boolean update = this.state.compareAndSet(state, setPartState(state, toState)); if (update) { + assert toState != EVICTED || reservations() == 0 : this; + try { ctx.wal().log(new PartitionMetaStateRecord(grp.groupId(), id, toState, 0)); } @@ -574,6 +575,8 @@ private boolean casState(long state, GridDhtPartitionState toState) { boolean update = this.state.compareAndSet(state, setPartState(state, toState)); if (update) { + assert toState != EVICTED || reservations() == 0 : this; + if (log.isDebugEnabled()) log.debug("Partition changed state [grp=" + grp.cacheOrGroupName() + ", p=" + id + ", prev=" + prevState + ", to=" + toState + "]"); @@ -591,7 +594,6 @@ public boolean own() { long state = this.state.get(); GridDhtPartitionState partState = getPartState(state); - if (partState == RENTING || partState == EVICTED) return false; @@ -679,18 +681,43 @@ private void clearAsync0(boolean updateSeq) { GridDhtPartitionState partState = getPartState(state); - boolean evictionRequested = partState == RENTING || delayedRenting; - boolean clearingRequested = partState == MOVING && clear; + boolean evictionRequested = partState == RENTING; + boolean clearingRequested = partState == MOVING; if (!evictionRequested && !clearingRequested) return; boolean reinitialized = clearFuture.initialize(updateSeq, evictionRequested); - // Clearing process is already running at the moment. No needs to run it again. + // Clearing process is already running at the moment. No need to run it again. if (!reinitialized) return; + // Make sure current rebalance future is finished before start clearing + // to avoid clearing currently rebalancing partition (except "initial" dummy rebalance). + if (clearingRequested) { + GridDhtPartitionDemander.RebalanceFuture rebFut = + (GridDhtPartitionDemander.RebalanceFuture)grp.preloader().rebalanceFuture(); + + if (!rebFut.isInitial() && !rebFut.isDone()) { + rebFut.listen(fut -> { + // Partition could be owned after rebalance future is done. Skip clearing in such case. + // Otherwise continue clearing. + if (fut.error() == null && state() == MOVING) { + if (freeAndEmpty(state) && !grp.queriesEnabled() && !groupReserved()) { + clearFuture.finish(); + + return; + } + + ctx.evict().evictPartitionAsync(grp, GridDhtLocalPartition.this); + } + }); + + return; + } + } + // Try fast eviction. if (freeAndEmpty(state) && !grp.queriesEnabled() && !groupReserved()) { if (partState == RENTING && casState(state, EVICTED) || clearingRequested) { @@ -702,7 +729,7 @@ private void clearAsync0(boolean updateSeq) { destroy(); } - if (log.isDebugEnabled()) + if (log.isDebugEnabled() && evictionRequested) log.debug("Partition has been fast evicted [grp=" + grp.cacheOrGroupName() + ", p=" + id + ", state=" + state() + "]"); @@ -716,6 +743,9 @@ private void clearAsync0(boolean updateSeq) { /** * Initiates single clear process if partition is in MOVING state or continues cleaning for RENTING state. * Method does nothing if clear process is already running. + * + * IMPORTANT: if clearing is required when after return from method call clear future must be initialized. + * This enforces clearing happens before sending demand requests. */ public void clearAsync() { GridDhtPartitionState state0 = state(); @@ -723,18 +753,7 @@ public void clearAsync() { if (state0 != MOVING && state0 != RENTING) return; - clear = true; - - GridDhtPartitionDemander.RebalanceFuture rebFut = - (GridDhtPartitionDemander.RebalanceFuture)grp.preloader().rebalanceFuture(); - - // Make sure current rebalance future finishes before clearing - // to avoid clearing currently rebalancing partition. - // NOTE: this invariant is not true for initial rebalance future. - if (rebFut.topologyVersion() != null && state0 == MOVING && !rebFut.isDone()) - rebFut.listen(fut -> clearAsync0(false)); - else - clearAsync0(false); + clearAsync0(false); } /** @@ -827,7 +846,10 @@ private void finishEviction(boolean updateSeq) { GridDhtPartitionState state = getPartState(state0); - if (state == EVICTED || (freeAndEmpty(state0) && state == RENTING && casState(state0, EVICTED))) + // Some entries still might be present in partition cache maps due to concurrent updates on backup nodes, + // but it's safe to finish eviction because no physical updates are possible. + if (state == EVICTED || + (store.isEmpty() && getReservations(state0) == 0 && state == RENTING && casState(state0, EVICTED))) updateSeqOnDestroy = updateSeq; } @@ -1133,8 +1155,8 @@ private long clearAll(EvictionContext evictionCtx) throws NodeStoppingException CacheDataRow row = it0.next(); // Do not clear fresh rows in case of partition reloading. - // This is required because updates are possible to moving partition which is currently cleared. - if (row.version().compareTo(clearVer) >= 0 && (state() == MOVING && clear)) + // This is required because normal updates are possible to moving partition which is currently cleared. + if (row.version().compareTo(clearVer) >= 0 && state() == MOVING) continue; if (grp.sharedGroup() && (hld == null || hld.cctx.cacheId() != row.cacheId())) @@ -1302,6 +1324,7 @@ private void clearDeferredDeletes() { "reservations", reservations(), "empty", isEmpty(), "createTime", U.format(createTime), + "fullSize", fullSize(), "cntr", dataStore().partUpdateCounter()); } @@ -1438,7 +1461,9 @@ public GridLongList finalizeUpdateCounters() { } /** - * @param last {@code True} is last batch for partition. + * Called before next batch is about to be applied during rebalance. Currently used for tests. + * + * @param last {@code True} if last batch for partition. */ public void beforeApplyBatch(boolean last) { // No-op. @@ -1579,8 +1604,6 @@ private void registerClearingCallback() { // Recreate cache data store in case of allowed fast eviction, and reset clear flag. listen(f -> { - clear = false; - clearingCbRegistered = false; }); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtPartitionTopologyImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtPartitionTopologyImpl.java index d51f053597857..6895e2a16a43b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtPartitionTopologyImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/GridDhtPartitionTopologyImpl.java @@ -75,6 +75,7 @@ import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_PART_DATA_LOST; import static org.apache.ignite.internal.events.DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT; +import static org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture.ExchangeType.ALL; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.EVICTED; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.LOST; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.MOVING; @@ -764,58 +765,62 @@ private boolean partitionLocalNode(int p, AffinityTopologyVersion topVer) { long updateSeq = this.updateSeq.incrementAndGet(); - for (int p = 0; p < partitions; p++) { - GridDhtLocalPartition locPart = localPartition0(p, topVer, false, true); + // Skip partition updates in case of not real exchange. + if (!ctx.localNode().isClient() && exchFut.exchangeType() == ALL) { + for (int p = 0; p < partitions; p++) { + GridDhtLocalPartition locPart = localPartition0(p, topVer, false, true); - if (partitionLocalNode(p, topVer)) { - // Prepare partition to rebalance if it's not happened on full map update phase. - if (locPart == null || locPart.state() == RENTING || locPart.state() == EVICTED) - locPart = rebalancePartition(p, true, exchFut); + if (partitionLocalNode(p, topVer)) { + // Prepare partition to rebalance if it's not happened on full map update phase. + if (locPart == null || locPart.state() == RENTING || locPart.state() == EVICTED) + locPart = rebalancePartition(p, true, exchFut); - GridDhtPartitionState state = locPart.state(); + GridDhtPartitionState state = locPart.state(); - if (state == MOVING) { - if (grp.rebalanceEnabled()) { - Collection owners = owners(p); + if (state == MOVING) { + if (grp.rebalanceEnabled()) { + Collection owners = owners(p); + + // If an owner node left during exchange, then new exchange should be started with detecting lost partitions. + if (!F.isEmpty(owners)) { + if (log.isDebugEnabled()) + log.debug("Will not own partition (there are owners to rebalance from) " + + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + ", owners = " + owners + ']'); + } - // If an owner node left during exchange, then new exchange should be started with detecting lost partitions. - if (!F.isEmpty(owners)) { - if (log.isDebugEnabled()) - log.debug("Will not own partition (there are owners to rebalance from) " + - "[grp=" + grp.cacheOrGroupName() + ", p=" + p + ", owners = " + owners + ']'); + // It's important to clear non empty moving partitions before full rebalancing. + // Consider the scenario: + // Node1 has keys k1 and k2 in the same partition. + // Node2 started rebalancing from Node1. + // Node2 received k1, k2 and failed before moving partition to OWNING state. + // Node1 removes k2 but update has not been delivered to Node1 because of failure. + // After new full rebalance Node1 will only send k1 to Node2 causing lost removal. + // NOTE: avoid calling clearAsync for partition twice per topology version. + if (grp.persistenceEnabled() && + exchFut.isClearingPartition(grp, locPart.id()) && + !locPart.isClearing() && + !locPart.isEmpty()) + locPart.clearAsync(); } - - // It's important to clear non empty moving partitions before full rebalancing. - // Consider the scenario: - // Node1 has keys k1 and k2 in the same partition. - // Node2 started rebalancing from Node1. - // Node2 received k1, k2 and failed before moving partition to OWNING state. - // Node1 removes k2 but update has not been delivered to Node1 because of failure. - // After new full rebalance Node1 will only send k1 to Node2 causing lost removal. - // NOTE: avoid calling clearAsync for partition twice per topology version. - // TODO FIXME clearing is not always needed see IGNITE-11799 - if (grp.persistenceEnabled() && !exchFut.isHistoryPartition(grp, locPart.id()) && - !locPart.isClearing() && !locPart.isEmpty()) - locPart.clearAsync(); + else + updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer); } - else - updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer); } - } - else { - if (locPart != null) { - GridDhtPartitionState state = locPart.state(); + else { + if (locPart != null) { + GridDhtPartitionState state = locPart.state(); - if (state == MOVING) { - locPart.rent(false); + if (state == MOVING) { + locPart.rent(false); - updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer); + updateSeq = updateLocal(p, locPart.state(), updateSeq, topVer); - changed = true; + changed = true; - if (log.isDebugEnabled()) { - log.debug("Evicting " + state + " partition (it does not belong to affinity) [" + - "grp=" + grp.cacheOrGroupName() + ", p=" + locPart.id() + ']'); + if (log.isDebugEnabled()) { + log.debug("Evicting " + state + " partition (it does not belong to affinity) [" + + "grp=" + grp.cacheOrGroupName() + ", p=" + locPart.id() + ']'); + } } } } @@ -867,7 +872,7 @@ private boolean partitionLocalNode(int p, AffinityTopologyVersion topVer) { * @param p Partition number. * @return Partition. */ - private GridDhtLocalPartition getOrCreatePartition(int p) { + public GridDhtLocalPartition getOrCreatePartition(int p) { assert lock.isWriteLockedByCurrentThread(); assert ctx.database().checkpointLockIsHeldByThread(); @@ -1587,9 +1592,8 @@ private boolean shouldOverridePartitionMap(GridDhtPartitionMap currentMap, GridD if (exchangeVer != null && nodeMap != null && grp.persistenceEnabled() && - readyTopVer.initialized()) { - - assert exchFut != null; + readyTopVer.initialized() && + exchFut != null) { for (Map.Entry e : nodeMap.entrySet()) { int p = e.getKey(); @@ -1609,7 +1613,10 @@ private boolean shouldOverridePartitionMap(GridDhtPartitionMap currentMap, GridD } } else if (state == MOVING) { - rebalancePartition(p, partsToReload.contains(p), exchFut); + GridDhtLocalPartition locPart = locParts.get(p); + + rebalancePartition(p, partsToReload.contains(p) || + locPart != null && locPart.state() == MOVING && exchFut.localJoinExchange(), exchFut); changed = true; } @@ -2234,6 +2241,8 @@ else if (plc != PartitionLossPolicy.IGNORE) { ctx.database().checkpointReadLock(); try { + Map> addToWaitGroups = new HashMap<>(); + lock.writeLock().lock(); try { @@ -2265,7 +2274,7 @@ else if (plc != PartitionLossPolicy.IGNORE) { GridDhtPartitionState state = partMap.get(part); - if (state == null || state != OWNING) + if (state != OWNING) continue; if (!newOwners.contains(remoteNodeId)) { @@ -2285,9 +2294,7 @@ else if (plc != PartitionLossPolicy.IGNORE) { UUID nodeId = entry.getKey(); Set rebalancedParts = entry.getValue(); - // Add to wait groups to ensure late assignment switch after all partitions are rebalanced. - for (Integer part : rebalancedParts) - ctx.cache().context().affinity().addToWaitGroup(groupId(), part, nodeId, topologyVersionFuture().initialVersion()); + addToWaitGroups.put(nodeId, new HashSet<>(rebalancedParts)); if (!rebalancedParts.isEmpty()) { Set historical = rebalancedParts.stream() @@ -2306,9 +2313,22 @@ else if (plc != PartitionLossPolicy.IGNORE) { } node2part = new GridDhtPartitionFullMap(node2part, updateSeq.incrementAndGet()); - } finally { + } + finally { lock.writeLock().unlock(); } + + for (Map.Entry> entry : addToWaitGroups.entrySet()) { + // Add to wait groups to ensure late assignment switch after all partitions are rebalanced. + for (Integer part : entry.getValue()) { + ctx.cache().context().affinity().addToWaitGroup( + groupId(), + part, + entry.getKey(), + topologyVersionFuture().initialVersion() + ); + } + } } finally { ctx.database().checkpointReadUnlock(); @@ -2346,8 +2366,8 @@ private GridDhtLocalPartition rebalancePartition(int p, boolean clear, GridDhtPa if (part.state() != MOVING) part.moving(); - if (!clear) - exchFut.addHistoryPartition(grp, part.id()); + if (clear) + exchFut.addClearingPartition(grp, part.id()); assert part.state() == MOVING : part; @@ -2624,14 +2644,14 @@ private void removeNode(UUID nodeId) { @Override public void ownMoving(AffinityTopologyVersion rebFinishedTopVer) { lock.writeLock().lock(); - AffinityTopologyVersion lastAffChangeVer = ctx.exchange().lastAffinityChangedTopologyVersion(lastTopChangeVer); + try { + AffinityTopologyVersion lastAffChangeVer = ctx.exchange().lastAffinityChangedTopologyVersion(lastTopChangeVer); - if (lastAffChangeVer.compareTo(rebFinishedTopVer) > 0) - log.info("Affinity topology changed, no MOVING partitions will be owned " + - "[rebFinishedTopVer=" + rebFinishedTopVer + - ", lastAffChangeVer=" + lastAffChangeVer + "]"); + if (lastAffChangeVer.compareTo(rebFinishedTopVer) > 0 && log.isInfoEnabled()) + log.info("Affinity topology changed, no MOVING partitions will be owned " + + "[rebFinishedTopVer=" + rebFinishedTopVer + + ", lastAffChangeVer=" + lastAffChangeVer + "]"); - try { for (GridDhtLocalPartition locPart : grp.topology().currentLocalPartitions()) { if (locPart.state() == MOVING) { boolean reserved = locPart.reserve(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/PartitionsEvictManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/PartitionsEvictManager.java index 826902cee1a87..6d16acc20d021 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/PartitionsEvictManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/dht/topology/PartitionsEvictManager.java @@ -245,7 +245,8 @@ private void showProgress() { if (threads == 0) threads = permits = 1; - log.info("Evict partition permits=" + permits); + if (log.isInfoEnabled()) + log.info("Evict partition permits=" + permits); evictionQueue = new BucketQueue(threads); } @@ -349,8 +350,9 @@ private void awaitFinishAll(){ private void awaitFinish(Integer part, IgniteInternalFuture fut) { // Wait for last offered partition eviction completion try { - log.info("Await partition evict, grpName=" + grp.cacheOrGroupName() + - ", grpId=" + grp.groupId() + ", partId=" + part); + if (log.isInfoEnabled()) + log.info("Await partition evict, grpName=" + grp.cacheOrGroupName() + + ", grpId=" + grp.groupId() + ", partId=" + part); fut.get(); } @@ -369,7 +371,7 @@ private void showProgress() { ", grpId=" + grp.groupId() + ", remainingPartsToEvict=" + (totalTasks.get() - taskInProgress) + ", partsEvictInProgress=" + taskInProgress + - ", totalParts= " + grp.topology().localPartitions().size() + "]"); + ", totalParts=" + grp.topology().localPartitions().size() + "]"); } } @@ -412,12 +414,8 @@ private PartitionEvictionTask( } try { - assert part.state() != GridDhtPartitionState.OWNING : part; - boolean success = part.tryClear(grpEvictionCtx); - assert part.state() != GridDhtPartitionState.OWNING : part; - if (success) { if (part.state() == GridDhtPartitionState.EVICTED && part.markForDestroy()) part.destroy(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java index 7e85e0592eaf8..ba76636815be1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticSerializableTxPrepareFuture.java @@ -42,6 +42,7 @@ import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.transactions.IgniteTxOptimisticCheckedException; +import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -52,6 +53,7 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.P1; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiInClosure; @@ -963,6 +965,14 @@ void onResult(final GridNearTxPrepareResponse res, boolean updateMapping) { * @param res Response. */ private void remap(final GridNearTxPrepareResponse res) { + if (parent.tx.isRollbackOnly()) { + onDone(new IgniteTxRollbackCheckedException( + "Failed to prepare the transaction, due to the transaction is marked as rolled back " + + "[tx=" + CU.txString(parent.tx) + ']')); + + return; + } + parent.prepareOnTopology(true, new Runnable() { @Override public void run() { onDone(res); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java index 8e10ad3ac5cd1..51e7b9e382029 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFuture.java @@ -1014,6 +1014,14 @@ void onResult(final GridNearTxPrepareResponse res) { * */ private void remap() { + if (parent.tx.isRollbackOnly()) { + onDone(new IgniteTxRollbackCheckedException( + "Failed to prepare the transaction, due to the transaction is marked as rolled back " + + "[tx=" + CU.txString(parent.tx) + ']')); + + return; + } + parent.prepareOnTopology(true, new Runnable() { @Override public void run() { onDone((GridNearTxPrepareResponse) null); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFutureAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFutureAdapter.java index 747013cffdd40..406139141aaf3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFutureAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearOptimisticTxPrepareFutureAdapter.java @@ -23,11 +23,12 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey; +import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException; import org.apache.ignite.internal.util.GridConcurrentHashSet; import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; -import org.apache.ignite.lang.IgniteInClosure; import org.jetbrains.annotations.Nullable; /** @@ -136,6 +137,14 @@ protected final void prepareOnTopology(final boolean remap, @Nullable final Runn return; } + if (tx.isRollbackOnly()) { + onDone(new IgniteTxRollbackCheckedException( + "Failed to prepare the transaction, due to the transaction is marked as rolled back " + + "[tx=" + CU.txString(tx) + ']')); + + return; + } + prepare0(remap, false); if (c != null) @@ -147,6 +156,14 @@ protected final void prepareOnTopology(final boolean remap, @Nullable final Runn return; try { + if (tx.isRollbackOnly()) { + onDone(new IgniteTxRollbackCheckedException( + "Failed to prepare the transaction, due to the transaction is marked as rolled back " + + "[tx=" + CU.txString(tx) + ']')); + + return; + } + prepareOnTopology(remap, c); } finally { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java index 36187e34803b2..f6619a334d17c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/distributed/near/GridNearTxLocal.java @@ -18,9 +18,11 @@ package org.apache.ignite.internal.processors.cache.distributed.near; import java.io.Externalizable; +import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; +import java.util.Date; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -28,6 +30,8 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReferenceFieldUpdater; import javax.cache.Cache; import javax.cache.CacheException; @@ -64,6 +68,7 @@ import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxKey; +import org.apache.ignite.internal.processors.cache.transactions.IgniteTxManager; import org.apache.ignite.internal.processors.cache.transactions.TransactionProxy; import org.apache.ignite.internal.processors.cache.transactions.TransactionProxyImpl; import org.apache.ignite.internal.processors.cache.transactions.TransactionProxyRollbackOnlyImpl; @@ -73,6 +78,7 @@ import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException; import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException; import org.apache.ignite.internal.util.GridLeanMap; +import org.apache.ignite.internal.util.GridStringBuilder; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.future.GridEmbeddedFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; @@ -126,6 +132,14 @@ public class GridNearTxLocal extends GridDhtTxLocalAdapter implements GridTimeou /** */ private static final long serialVersionUID = 0L; + /** Histogram buckets for metrics of system and user time. */ + public static final long[] METRIC_TIME_BUCKETS = + new long[] { 1, 2, 4, 8, 16, 25, 50, 75, 100, 250, 500, 750, 1000, 3000, 5000, 10000, 25000, 60000}; + + /** */ + private static final ThreadLocal TIME_FORMAT = + ThreadLocal.withInitial(() -> new SimpleDateFormat("HH:mm:ss.SSS")); + /** Prepare future updater. */ private static final AtomicReferenceFieldUpdater PREP_FUT_UPD = AtomicReferenceFieldUpdater.newUpdater(GridNearTxLocal.class, IgniteInternalFuture.class, "prepFut"); @@ -168,6 +182,40 @@ public class GridNearTxLocal extends GridDhtTxLocalAdapter implements GridTimeou /** */ private boolean trackTimeout; + /** + * Counts how much time this transaction has spent on system calls, in nanoseconds. + */ + private final AtomicLong systemTime = new AtomicLong(0); + + /** + * Stores the nano time value when current system time has started, or 0 if no system section + * is running currently. + */ + private final AtomicLong systemStartTime = new AtomicLong(0); + + /** + * Stores the nano time value when prepare step has started, or 0 if no prepare step + * has started yet. + */ + private final AtomicLong prepareStartTime = new AtomicLong(0); + + /** + * Stores prepare step duration, or 0 if it has not finished yet. + */ + private final AtomicLong prepareTime = new AtomicLong(0); + + /** + * Stores the nano time value when commit or rollback step has started, or 0 if it + * has not started yet. + */ + private final AtomicLong commitOrRollbackStartTime = new AtomicLong(0); + + /** Stores commit or rollback step duration, or 0 if it has not finished yet. */ + private final AtomicLong commitOrRollbackTime = new AtomicLong(0); + + /** */ + private IgniteTxManager.TxDumpsThrottling txDumpsThrottling; + /** */ @GridToStringExclude private TransactionProxyImpl proxy; @@ -200,6 +248,7 @@ public GridNearTxLocal() { * @param subjId Subject ID. * @param taskNameHash Task name hash code. * @param lb Label. + * @param txDumpsThrottling Log throttling information. */ public GridNearTxLocal( GridCacheSharedContext ctx, @@ -214,7 +263,8 @@ public GridNearTxLocal( int txSize, @Nullable UUID subjId, int taskNameHash, - @Nullable String lb + @Nullable String lb, + IgniteTxManager.TxDumpsThrottling txDumpsThrottling ) { super( ctx, @@ -237,6 +287,8 @@ public GridNearTxLocal( mappings = implicitSingle ? new IgniteTxMappingsSingleImpl() : new IgniteTxMappingsImpl(); + this.txDumpsThrottling = txDumpsThrottling; + initResult(); trackTimeout = timeout() > 0 && !implicit() && cctx.time().addTimeoutObject(this); @@ -3235,10 +3287,131 @@ private void readyNearLock(IgniteTxEntry txEntry, return true; } + /** + * Returns current amount of time that transaction has spent on system activities (acquiring locks, commiting, + * rolling back, etc.) + * + * @return Amount of time in milliseconds. + */ + public long systemTimeCurrent() { + long systemTime0 = systemTime.get(); + + long systemStartTime0 = systemStartTime.get(); + + long t = systemStartTime0 == 0 ? 0 : (System.nanoTime() - systemStartTime0); + + return U.nanosToMillis(systemTime0 + t); + } + + /** {@inheritDoc} */ + @Override public boolean state(TransactionState state) { + boolean res = super.state(state); + + if (state == COMMITTED || state == ROLLED_BACK) { + leaveSystemSection(); + + //if commitOrRollbackTime != 0 it means that we already have written metrics and dumped it in log at least once + if (!commitOrRollbackTime.compareAndSet(0, System.nanoTime() - commitOrRollbackStartTime.get())) + return res; + + long systemTimeMillis = U.nanosToMillis(this.systemTime.get()); + long totalTimeMillis = System.currentTimeMillis() - startTime(); + + //in some cases totalTimeMillis can be less than systemTimeMillis, as they are calculated with different precision + long userTimeMillis = Math.max(totalTimeMillis - systemTimeMillis, 0); + + writeTxMetrics(systemTimeMillis, userTimeMillis); + + boolean willBeSkipped = txDumpsThrottling == null || txDumpsThrottling.skipCurrent(); + + if (!willBeSkipped) { + long transactionTimeDumpThreshold = cctx.tm().longTransactionTimeDumpThreshold(); + + double transactionTimeDumpSamplesCoefficient = cctx.tm().transactionTimeDumpSamplesCoefficient(); + + boolean isLong = transactionTimeDumpThreshold > 0 && totalTimeMillis > transactionTimeDumpThreshold; + + boolean randomlyChosen = transactionTimeDumpSamplesCoefficient > 0.0 + && ThreadLocalRandom.current().nextDouble() <= transactionTimeDumpSamplesCoefficient; + + if (randomlyChosen || isLong) { + String txDump = completedTransactionDump(state, systemTimeMillis, userTimeMillis, isLong); + + if (isLong) + log.warning(txDump); + else + log.info(txDump); + + txDumpsThrottling.dump(); + } + } + else if (txDumpsThrottling != null) + txDumpsThrottling.skip(); + } + + return res; + } + + /** + * Builds dump string for completed transaction. + * + * @param state Transaction state. + * @param systemTimeMillis System time in milliseconds. + * @param userTimeMillis User time in milliseconds. + * @param isLong Whether the dumped transaction is long running or not. + * @return Dump string. + */ + private String completedTransactionDump( + TransactionState state, + long systemTimeMillis, + long userTimeMillis, + boolean isLong + ) { + long cacheOperationsTimeMillis = + U.nanosToMillis(systemTime.get() - prepareTime.get() - commitOrRollbackTime.get()); + + GridStringBuilder warning = new GridStringBuilder(isLong ? "Long transaction time dump " : "Transaction time dump ") + .a("[startTime=") + .a(TIME_FORMAT.get().format(new Date(startTime))) + .a(", totalTime=") + .a(systemTimeMillis + userTimeMillis) + .a(", systemTime=") + .a(systemTimeMillis) + .a(", userTime=") + .a(userTimeMillis) + .a(", cacheOperationsTime=") + .a(cacheOperationsTimeMillis); + + if (state == COMMITTED) { + warning + .a(", prepareTime=") + .a(timeMillis(prepareTime)) + .a(", commitTime=") + .a(timeMillis(commitOrRollbackTime)); + } + else { + warning + .a(", rollbackTime=") + .a(timeMillis(commitOrRollbackTime)); + } + + warning + .a(", tx=") + .a(this) + .a("]"); + + return warning.toString(); + } + /** * @return Tx prepare future. */ public IgniteInternalFuture prepareNearTxLocal() { + enterSystemSection(); + + //we assume that prepare start time should be set only once for the transaction + prepareStartTime.compareAndSet(0, System.nanoTime()); + GridNearTxPrepareFutureAdapter fut = (GridNearTxPrepareFutureAdapter)prepFut; if (fut == null) { @@ -3352,6 +3525,11 @@ public IgniteInternalFuture commitNearTxLocalAsync() { prepareFut.listen(new CI1>() { @Override public void apply(IgniteInternalFuture f) { + //these values should not be changed after set once + prepareTime.compareAndSet(0, System.nanoTime() - prepareStartTime.get()); + + commitOrRollbackStartTime.compareAndSet(0, System.nanoTime()); + try { // Make sure that here are no exceptions. f.get(); @@ -3410,6 +3588,8 @@ public IgniteInternalFuture rollbackNearTxLocalAsync(final boo if (log.isDebugEnabled()) log.debug("Rolling back near tx: " + this); + enterSystemSection(); + if (!onTimeout && trackTimeout) removeTimeoutHandler(); @@ -4331,6 +4511,44 @@ public boolean addTimeoutHandler() { } } + /** */ + private long timeMillis(AtomicLong atomicNanoTime) { + return U.nanosToMillis(atomicNanoTime.get()); + } + + /** + * Enters the section when system time for this transaction is counted. + */ + public void enterSystemSection() { + //setting systemStartTime only if it equals 0, otherwise it means that we are already in system section + //and sould do nothing. + systemStartTime.compareAndSet(0, System.nanoTime()); + } + + /** + * Leaves the section when system time for this transaction is counted. + */ + public void leaveSystemSection() { + long systemStartTime0 = systemStartTime.getAndSet(0); + + if (systemStartTime0 > 0) + systemTime.addAndGet(System.nanoTime() - systemStartTime0); + } + + /** + * Writes system and user time metrics. + * + * @param systemTime System time. + * @param userTime User time. + */ + private void writeTxMetrics(long systemTime, long userTime) { + if (systemTime > 0) + cctx.txMetrics().writeTxSystemTime(systemTime); + + if (userTime > 0) + cctx.txMetrics().writeTxUserTime(userTime); + } + /** * Post-lock closure. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java index 33a52a11a03d1..f2a4b30c4af82 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridCacheDrManager.java @@ -60,10 +60,9 @@ public void replicate(KeyCacheObject key, * * @param topVer Topology version. * @param left {@code True} if exchange has been caused by node leave. - * @param activate {@code True} if exchange has been caused by cluster activation. * @throws IgniteCheckedException If failed. */ - public void onExchange(AffinityTopologyVersion topVer, boolean left, boolean activate) throws IgniteCheckedException; + public void onExchange(AffinityTopologyVersion topVer, boolean left) throws IgniteCheckedException; /** * @return {@code True} is DR is enabled. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java index 425e79c536344..f3c1b23f7c7d6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/dr/GridOsCacheDrManager.java @@ -78,7 +78,7 @@ public class GridOsCacheDrManager implements GridCacheDrManager { } /** {@inheritDoc} */ - @Override public void onExchange(AffinityTopologyVersion topVer, boolean left, boolean activate) throws IgniteCheckedException { + @Override public void onExchange(AffinityTopologyVersion topVer, boolean left) throws IgniteCheckedException { // No-op. } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCacheEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCacheEntry.java index e26174a2adef5..65f7c400330bd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCacheEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/local/GridLocalCacheEntry.java @@ -199,8 +199,9 @@ void readyLocal(GridCacheMvccCandidate cand) { /** * Rechecks if lock should be reassigned. + * @return owner */ - public void recheck() { + public CacheLockCandidates recheck(GridCacheMvccCandidate checkingCandidate) { CacheObject val; CacheLockCandidates prev = null; CacheLockCandidates owner = null; @@ -225,7 +226,9 @@ public void recheck() { unlockEntry(); } - checkOwnerChanged(prev, owner, val); + checkOwnerChanged(prev, owner, val, checkingCandidate); + + return owner; } /** {@inheritDoc} */ @@ -248,10 +251,14 @@ public void recheck() { // At this point candidate may have been removed and entry destroyed, // so we check for null. - if (e != null) - e.recheck(); - - break; + if (e != null) { + CacheLockCandidates newOwner = e.recheck(owner); + if(newOwner == null || !newOwner.hasCandidate(cand.version())) + // the lock from the chain hasn't been acquired, no sense to check the rest of the chain + break; + } + else + break; } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java index a566ae90b53eb..2a33ac18e58ad 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheDatabaseSharedManager.java @@ -148,11 +148,11 @@ import org.apache.ignite.internal.processors.query.GridQueryProcessor; import org.apache.ignite.internal.stat.IoStatisticsHolderNoOp; import org.apache.ignite.internal.util.GridConcurrentHashSet; -import org.apache.ignite.internal.util.GridCountDownCallback; import org.apache.ignite.internal.util.GridMultiCollectionWrapper; import org.apache.ignite.internal.util.GridReadOnlyArrayView; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.TimeBag; import org.apache.ignite.internal.util.future.CountDownFuture; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -194,7 +194,11 @@ import static org.apache.ignite.internal.LongJVMPauseDetector.DEFAULT_JVM_PAUSE_DETECTOR_THRESHOLD; import static org.apache.ignite.internal.pagemem.PageIdUtils.partId; import static org.apache.ignite.internal.pagemem.wal.record.WALRecord.RecordType.CHECKPOINT_RECORD; +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.TMP_FILE_MATCHER; import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.fromOrdinal; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.getType; +import static org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO.getVersion; +import static org.apache.ignite.internal.util.IgniteUtils.hexLong; /** * @@ -207,6 +211,9 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** */ public static final String IGNITE_PDS_SKIP_CHECKPOINT_ON_NODE_STOP = "IGNITE_PDS_SKIP_CHECKPOINT_ON_NODE_STOP"; + /** Log read lock holders. */ + public static final String IGNITE_PDS_LOG_CP_READ_LOCK_HOLDERS = "IGNITE_PDS_LOG_CP_READ_LOCK_HOLDERS"; + /** MemoryPolicyConfiguration name reserved for meta store. */ private static final String METASTORE_DATA_REGION_NAME = "metastoreMemPlc"; @@ -235,6 +242,9 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** */ private final boolean skipCheckpointOnNodeStop = getBoolean(IGNITE_PDS_SKIP_CHECKPOINT_ON_NODE_STOP, false); + /** */ + private final boolean logReadLockHolders = getBoolean(IGNITE_PDS_LOG_CP_READ_LOCK_HOLDERS); + /** * Starting from this number of dirty pages in checkpoint, array will be sorted with * {@link Arrays#parallelSort(Comparable[])} in case of {@link CheckpointWriteOrder#SEQUENTIAL}. @@ -300,7 +310,7 @@ public class GridCacheDatabaseSharedManager extends IgniteCacheDatabaseSharedMan /** For testing only. */ private volatile GridFutureAdapter enableChangeApplied; - /** */ + /** Checkpont lock. */ ReentrantReadWriteLock checkpointLock = new ReentrantReadWriteLock(); /** */ @@ -538,6 +548,9 @@ private DataRegionConfiguration createMetastoreDataRegionConfig(DataStorageConfi final GridKernalContext kernalCtx = cctx.kernalContext(); + if (logReadLockHolders) + checkpointLock = new U.ReentrantReadWriteLockTracer(checkpointLock, kernalCtx, 5_000); + if (!kernalCtx.clientNode()) { checkpointer = new Checkpointer(cctx.igniteInstanceName(), "db-checkpoint-thread", log); @@ -574,10 +587,7 @@ private DataRegionConfiguration createMetastoreDataRegionConfig(DataStorageConfi */ public void cleanupTempCheckpointDirectory() throws IgniteCheckedException { try { - try (DirectoryStream files = Files.newDirectoryStream( - cpDir.toPath(), - path -> path.endsWith(FilePageStoreManager.TMP_SUFFIX)) - ) { + try (DirectoryStream files = Files.newDirectoryStream(cpDir.toPath(), TMP_FILE_MATCHER::matches)) { for (Path path : files) Files.delete(path); } @@ -976,7 +986,7 @@ private void finishRecovery() throws IgniteCheckedException { long time = System.currentTimeMillis(); - checkpointReadLock(); + CHECKPOINT_LOCK_HOLD_COUNT.set(CHECKPOINT_LOCK_HOLD_COUNT.get() + 1); try { for (DatabaseLifecycleListener lsnr : getDatabaseListeners(cctx.kernalContext())) @@ -1008,7 +1018,7 @@ private void finishRecovery() throws IgniteCheckedException { throw e; } finally { - checkpointReadUnlock(); + CHECKPOINT_LOCK_HOLD_COUNT.set(CHECKPOINT_LOCK_HOLD_COUNT.get() - 1); } } @@ -1579,8 +1589,8 @@ private void prepareIndexRebuildFuture(int cacheId) { CacheConfiguration ccfg = cacheCtx.config(); - if (ccfg != null) { - log().info("Finished indexes rebuilding for cache [name=" + ccfg.getName() + if (ccfg != null && log.isInfoEnabled()) { + log.info("Finished indexes rebuilding for cache [name=" + ccfg.getName() + ", grpName=" + ccfg.getGroupName() + ']'); } } @@ -2110,7 +2120,7 @@ private WALPointer readPointer(File cpMarkerFile, ByteBuffer buf) throws IgniteC } /** {@inheritDoc} */ - @Override public void startMemoryRestore(GridKernalContext kctx) throws IgniteCheckedException { + @Override public void startMemoryRestore(GridKernalContext kctx, TimeBag startTimer) throws IgniteCheckedException { if (kctx.clientNode()) return; @@ -2120,6 +2130,8 @@ private WALPointer readPointer(File cpMarkerFile, ByteBuffer buf) throws IgniteC // Preform early regions startup before restoring state. initAndStartRegions(kctx.config().getDataStorageConfiguration()); + startTimer.finishGlobalStage("Init and start regions"); + for (DatabaseLifecycleListener lsnr : getDatabaseListeners(kctx)) lsnr.beforeBinaryMemoryRestore(this); @@ -2141,6 +2153,8 @@ private WALPointer readPointer(File cpMarkerFile, ByteBuffer buf) throws IgniteC dumpPartitionsInfo(cctx, log); } + startTimer.finishGlobalStage("Restore binary memory"); + for (DatabaseLifecycleListener lsnr : getDatabaseListeners(kctx)) lsnr.afterBinaryMemoryRestore(this); @@ -2168,6 +2182,8 @@ private WALPointer readPointer(File cpMarkerFile, ByteBuffer buf) throws IgniteC dumpPartitionsInfo(cctx, log); } + startTimer.finishGlobalStage("Restore logical state"); + walTail = tailPointer(logicalState.lastRead); cctx.wal().onDeActivate(kctx); @@ -2261,8 +2277,9 @@ private WALPointer performBinaryMemoryRestore( WALPointer cpMark = ((CheckpointRecord)rec).checkpointMark(); if (cpMark != null) { - log.info("Restoring checkpoint after logical recovery, will start physical recovery from " + - "back pointer: " + cpMark); + if (log.isInfoEnabled()) + log.info("Restoring checkpoint after logical recovery, will start physical recovery from " + + "back pointer: " + cpMark); recPtr = cpMark; } @@ -2379,7 +2396,7 @@ private WALPointer performBinaryMemoryRestore( stripedApplyPage((pageMem) -> { try { - applyPageDelta(pageMem, pageDelta); + applyPageDelta(pageMem, pageDelta, true); applied.incrementAndGet(); } @@ -2407,10 +2424,12 @@ private WALPointer performBinaryMemoryRestore( "on disk, but checkpoint record is missed in WAL) " + "[cpStatus=" + status + ", lastRead=" + lastReadPtr + "]"); - log.info("Finished applying memory changes [changesApplied=" + applied + - ", time=" + (U.currentTimeMillis() - start) + " ms]"); + if (log.isInfoEnabled()) + log.info("Finished applying memory changes [changesApplied=" + applied + + ", time=" + (U.currentTimeMillis() - start) + " ms]"); - assert applied.get() > 0; + //Error in backport GG-17348 to 2.5.X branch. + //assert applied.get() > 0; finalizeCheckpointOnRecovery(status.cpStartTs, status.cpStartId, status.startPtr, exec); } @@ -2575,24 +2594,25 @@ public void applyPageSnapshot(PageMemoryEx pageMem, PageSnapshot pageSnapshotRec /** * @param pageMem Page memory. * @param pageDeltaRecord Page delta record. + * @param restore Get page for restore. * @throws IgniteCheckedException If failed. */ - private void applyPageDelta(PageMemoryEx pageMem, PageDeltaRecord pageDeltaRecord) throws IgniteCheckedException { + private void applyPageDelta(PageMemoryEx pageMem, PageDeltaRecord pageDeltaRecord, boolean restore) throws IgniteCheckedException { int grpId = pageDeltaRecord.groupId(); long pageId = pageDeltaRecord.pageId(); // Here we do not require tag check because we may be applying memory changes after // several repetitive restarts and the same pages may have changed several times. - long page = pageMem.acquirePage(grpId, pageId, IoStatisticsHolderNoOp.INSTANCE, true); + long page = pageMem.acquirePage(grpId, pageId, IoStatisticsHolderNoOp.INSTANCE, restore); try { - long pageAddr = pageMem.writeLock(grpId, pageId, page, true); + long pageAddr = pageMem.writeLock(grpId, pageId, page, restore); try { pageDeltaRecord.applyDelta(pageMem, pageAddr); } finally { - pageMem.writeUnlock(grpId, pageId, page, null, true, true); + pageMem.writeUnlock(grpId, pageId, page, null, true, restore); } } finally { @@ -2840,7 +2860,7 @@ private RestoreLogicalState applyLogicalUpdates( stripedApplyPage((pageMem) -> { try { - applyPageDelta(pageMem, pageDelta); + applyPageDelta(pageMem, pageDelta, false); } catch (IgniteCheckedException e) { U.error(log, "Failed to apply page delta, " + pageDelta); @@ -2986,6 +3006,19 @@ private void finalizeCheckpointOnRecovery( int innerIdx = i; exec.execute(stripeIdx, () -> { + PageStoreWriter pageStoreWriter = (fullPageId, buf, tag) -> { + assert tag != PageMemoryImpl.TRY_AGAIN_TAG : "Lock is held by other thread for page " + fullPageId; + + int groupId = fullPageId.groupId(); + long pageId = fullPageId.pageId(); + + // Write buf to page store. + PageStore store = storeMgr.writeInternal(groupId, pageId, buf, tag, true); + + // Save store for future fsync. + updStores.add(store); + }; + // Local buffer for write pages. ByteBuffer writePageBuf = ByteBuffer.allocateDirect(pageSize()); @@ -2993,7 +3026,7 @@ private void finalizeCheckpointOnRecovery( Collection pages0 = pages.innerCollection(innerIdx); - FullPageId pageId = null; + FullPageId fullPageId = null; try { for (FullPageId fullId : pages0) { @@ -3001,38 +3034,21 @@ private void finalizeCheckpointOnRecovery( if (writePagesError.get() != null) break; - writePageBuf.rewind(); + // Save pageId to local variable for future using if exception occurred. + fullPageId = fullId; PageMemoryEx pageMem = getPageMemoryForCacheGroup(fullId.groupId()); - // Write page content to writePageBuf. - Integer tag = pageMem.getForCheckpoint(fullId, writePageBuf, null); - - assert tag == null || tag != PageMemoryImpl.TRY_AGAIN_TAG : - "Lock is held by other thread for page " + fullId; - - if (tag != null) { - writePageBuf.rewind(); - - // Save pageId to local variable for future using if exception occurred. - pageId = fullId; - - // Write writePageBuf to page store. - PageStore store = storeMgr.writeInternal( - fullId.groupId(), fullId.pageId(), writePageBuf, tag, true); - - writePageBuf.rewind(); - - // Save store for future fsync. - updStores.add(store); - } + // Write page content to page store via pageStoreWriter. + // Tracker is null, because no need to track checkpoint metrics on recovery. + pageMem.checkpointWritePage(fullId, writePageBuf, pageStoreWriter, null); } // Add number of handled pages. cpPagesCnt.addAndGet(pages0.size()); } catch (IgniteCheckedException e) { - U.error(log, "Failed to write page to pageStore, pageId=" + pageId); + U.error(log, "Failed to write page to pageStore, pageId=" + fullPageId); writePagesError.compareAndSet(null, e); } @@ -4115,21 +4131,22 @@ private Checkpoint markCheckpointBegin(CheckpointMetricsTracker tracker) throws if (printCheckpointStats && log.isInfoEnabled()) { long possibleJvmPauseDur = possibleLongJvmPauseDuration(tracker); - log.info( - String.format( - CHECKPOINT_STARTED_LOG_FORMAT, - cpRec.checkpointId(), - cp.checkpointMark(), - tracker.beforeLockDuration(), - tracker.lockWaitDuration(), - tracker.listenersExecuteDuration(), - tracker.lockHoldDuration(), - tracker.walCpRecordFsyncDuration(), - possibleJvmPauseDur > 0 ? "possibleJvmPauseDuration=" + possibleJvmPauseDur + "ms," : "", - cpPages.size(), - curr.reason - ) - ); + if (log.isInfoEnabled()) + log.info( + String.format( + CHECKPOINT_STARTED_LOG_FORMAT, + cpRec.checkpointId(), + cp.checkpointMark(), + tracker.beforeLockDuration(), + tracker.lockWaitDuration(), + tracker.listenersExecuteDuration(), + tracker.lockHoldDuration(), + tracker.walCpRecordFsyncDuration(), + possibleJvmPauseDur > 0 ? "possibleJvmPauseDuration=" + possibleJvmPauseDur + "ms," : "", + cpPages.size(), + curr.reason + ) + ); } return new Checkpoint(cp, cpPages, curr); @@ -4706,6 +4723,9 @@ private WriteCheckpointPages( if (pagesToRetry.isEmpty()) doneFut.onDone((Void)null); else { + LT.warn(log, pagesToRetry.size() + " checkpoint pages were not written yet due to unsuccessful " + + "page write lock acquisition and will be retried"); + if (retryWriteExecutor == null) { while (!pagesToRetry.isEmpty()) pagesToRetry = writePages(pagesToRetry); @@ -4737,10 +4757,14 @@ private WriteCheckpointPages( * @return pagesToRetry Pages which should be retried. */ private List writePages(Collection writePageIds) throws IgniteCheckedException { - ByteBuffer tmpWriteBuf = threadBuf.get(); - List pagesToRetry = new ArrayList<>(); + CheckpointMetricsTracker tracker = persStoreMetrics.metricsEnabled() ? this.tracker : null; + + PageStoreWriter pageStoreWriter = createPageStoreWriter(pagesToRetry); + + ByteBuffer tmpWriteBuf = threadBuf.get(); + for (FullPageId fullId : writePageIds) { if (checkpointer.shutdownNow) break; @@ -4770,23 +4794,36 @@ private List writePages(Collection writePageIds) throws pageMem = (PageMemoryEx)metaStorage.pageMemory(); - Integer tag = pageMem.getForCheckpoint( - fullId, tmpWriteBuf, persStoreMetrics.metricsEnabled() ? tracker : null); + pageMem.checkpointWritePage(fullId, tmpWriteBuf, pageStoreWriter, tracker); + } - if (tag != null) { + return pagesToRetry; + } + + /** + * Factory method for create {@link PageStoreWriter}. + * + * @param pagesToRetry List pages for retry. + * @return Checkpoint page write context. + */ + private PageStoreWriter createPageStoreWriter(List pagesToRetry) { + return new PageStoreWriter() { + /** {@inheritDoc} */ + @Override public void writePage(FullPageId fullPageId, ByteBuffer buf, int tag) throws IgniteCheckedException { if (tag == PageMemoryImpl.TRY_AGAIN_TAG) { - pagesToRetry.add(fullId); + pagesToRetry.add(fullPageId); - continue; + return; } - assert PageIO.getType(tmpWriteBuf) != 0 : "Invalid state. Type is 0! pageId = " + U.hexLong(fullId.pageId()); - assert PageIO.getVersion(tmpWriteBuf) != 0 : "Invalid state. Version is 0! pageId = " + U.hexLong(fullId.pageId()); + int groupId = fullPageId.groupId(); + long pageId = fullPageId.pageId(); - tmpWriteBuf.rewind(); + assert getType(buf) != 0 : "Invalid state. Type is 0! pageId = " + hexLong(pageId); + assert getVersion(buf) != 0 : "Invalid state. Version is 0! pageId = " + hexLong(pageId); if (persStoreMetrics.metricsEnabled()) { - int pageType = PageIO.getType(tmpWriteBuf); + int pageType = getType(buf); if (PageIO.isDataPageType(pageType)) tracker.onDataPageWritten(); @@ -4794,13 +4831,11 @@ private List writePages(Collection writePageIds) throws writtenPagesCntr.incrementAndGet(); - PageStore store = storeMgr.writeInternal(grpId, fullId.pageId(), tmpWriteBuf, tag, true); + PageStore store = storeMgr.writeInternal(groupId, pageId, buf, tag, true); updStores.computeIfAbsent(store, k -> new LongAdder()).increment(); } - } - - return pagesToRetry; + }; } } @@ -5381,11 +5416,12 @@ private static void dumpPartitionsInfo(CacheGroupContext grp, IgniteLogger log) GridDhtLocalPartition part = grp.topology().localPartition(p); if (part != null) { - log.info("Partition [grp=" + grp.cacheOrGroupName() - + ", id=" + p - + ", state=" + part.state() - + ", counter=" + part.dataStore().partUpdateCounter() - + ", size=" + part.fullSize() + "]"); + if (log.isInfoEnabled()) + log.info("Partition [grp=" + grp.cacheOrGroupName() + + ", id=" + p + + ", state=" + part.state() + + ", counter=" + part.dataStore().partUpdateCounter() + + ", size=" + part.fullSize() + "]"); continue; } @@ -5396,7 +5432,8 @@ private static void dumpPartitionsInfo(CacheGroupContext grp, IgniteLogger log) pageStore.ensure(grp.groupId(), p); if (pageStore.pages(grp.groupId(), p) <= 1) { - log.info("Partition [grp=" + grp.cacheOrGroupName() + ", id=" + p + ", state=N/A (only file header) ]"); + if (log.isInfoEnabled()) + log.info("Partition [grp=" + grp.cacheOrGroupName() + ", id=" + p + ", state=N/A (only file header) ]"); continue; } @@ -5417,11 +5454,12 @@ private static void dumpPartitionsInfo(CacheGroupContext grp, IgniteLogger log) long updateCntr = io.getUpdateCounter(pageAddr); long size = io.getSize(pageAddr); - log.info("Partition [grp=" + grp.cacheOrGroupName() - + ", id=" + p - + ", state=" + state - + ", counter=" + updateCntr - + ", size=" + size + "]"); + if (log.isInfoEnabled()) + log.info("Partition [grp=" + grp.cacheOrGroupName() + + ", id=" + p + + ", state=" + state + + ", counter=" + updateCntr + + ", size=" + size + "]"); } finally { pageMem.readUnlock(grp.groupId(), partMetaId, partMetaPage); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java index 5d6b53ae813cf..d016456a49772 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/GridCacheOffheapManager.java @@ -511,6 +511,8 @@ else if (needSnapshot) for (int p = 0; p < grp.affinity().partitions(); p++) { Integer recoverState = partitionRecoveryStates.get(new GroupPartitionId(grp.groupId(), p)); + long startTime = U.currentTimeMillis(); + if (ctx.pageStore().exists(grp.groupId(), p)) { ctx.pageStore().ensure(grp.groupId(), p); @@ -555,7 +557,8 @@ else if (needSnapshot) if (log.isDebugEnabled()) log.debug("Restored partition state (from WAL) " + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + ", state=" + part.state() + - ", updCntr=" + part.initialUpdateCounter() + "]"); + ", updCntr=" + part.initialUpdateCounter() + + ", size=" + part.fullSize() + "]"); } else { int stateId = (int) io.getPartitionState(pageAddr); @@ -565,7 +568,8 @@ else if (needSnapshot) if (log.isDebugEnabled()) log.debug("Restored partition state (from page memory) " + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + ", state=" + part.state() + - ", updCntr=" + part.initialUpdateCounter() + ", stateId=" + stateId + "]"); + ", updCntr=" + part.initialUpdateCounter() + ", stateId=" + stateId + + ", size=" + part.fullSize() + "]"); } } finally { @@ -590,13 +594,19 @@ else if (recoverState != null) { // Pre-create partition if having valid state. if (log.isDebugEnabled()) log.debug("Restored partition state (from WAL) " + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + ", state=" + part.state() + - ", updCntr=" + part.initialUpdateCounter() + "]"); + ", updCntr=" + part.initialUpdateCounter() + + ", size=" + part.fullSize() + "]"); } else { if (log.isDebugEnabled()) log.debug("Skipping partition on recovery (no page store OR wal state) " + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + "]"); } + + if (log.isDebugEnabled()) + log.debug("Finished restoring partition state " + + "[grp=" + grp.cacheOrGroupName() + ", p=" + p + + ", time=" + (U.currentTimeMillis() - startTime) + " ms]"); } partitionStatesRestored = true; @@ -1598,7 +1608,7 @@ public class GridCacheDataStore implements CacheDataStore { /** * @param partId Partition. - * @param exists {@code True} if store for this index exists. + * @param exists {@code True} if store exists. */ private GridCacheDataStore(int partId, boolean exists) { this.partId = partId; @@ -1634,6 +1644,9 @@ private String pendingEntriesTreeName() { } /** + * @param checkExists If {@code true} data store won't be initialized if it doesn't exists + * (has non empty data file). This is an optimization for lazy store initialization on writes. + * * @return Store delegate. * @throws IgniteCheckedException If failed. */ @@ -1676,7 +1689,6 @@ private CacheDataStore init0(boolean checkExists) throws IgniteCheckedException freeListName, grp.dataRegion().memoryMetrics(), grp.dataRegion(), - null, ctx.wal(), reuseRoot.pageId().pageId(), reuseRoot.isAllocated(), @@ -2184,11 +2196,9 @@ private Metas getOrAllocatePartitionMetas() throws IgniteCheckedException { /** {@inheritDoc} */ @Override public void updateInitialCounter(long start, long delta) { try { - CacheDataStore delegate0 = init0(true); - - if (delegate0 == null) - throw new IllegalStateException("Should be never called."); + CacheDataStore delegate0 = init0(false); + // Partition may not exists before recovery starts in case of recovering counters from RollbackRecord. delegate0.updateInitialCounter(start, delta); } catch (IgniteCheckedException e) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java index bd07673221c14..d9e9b5d82e2d8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/IgniteCacheDatabaseSharedManager.java @@ -27,22 +27,24 @@ import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import javax.management.InstanceNotFoundException; - import org.apache.ignite.DataRegionMetrics; +import org.apache.ignite.DataRegionMetricsProvider; import org.apache.ignite.DataStorageMetrics; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.IgniteSystemProperties; -import org.apache.ignite.DataRegionMetricsProvider; import org.apache.ignite.configuration.DataPageEvictionMode; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.failure.FailureContext; +import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.mem.DirectMemoryRegion; +import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; import org.apache.ignite.internal.mem.file.MappedFileMemoryProvider; import org.apache.ignite.internal.mem.unsafe.UnsafeMemoryProvider; import org.apache.ignite.internal.pagemem.PageMemory; @@ -59,6 +61,7 @@ import org.apache.ignite.internal.processors.cache.persistence.evict.Random2LruPageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.evict.RandomLruPageEvictionTracker; import org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings; +import org.apache.ignite.internal.processors.cache.persistence.freelist.AbstractFreeList; import org.apache.ignite.internal.processors.cache.persistence.freelist.CacheFreeList; import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList; import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; @@ -66,6 +69,7 @@ import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; import org.apache.ignite.internal.processors.cluster.IgniteChangeGlobalStateSupport; +import org.apache.ignite.internal.util.TimeBag; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.LT; @@ -257,7 +261,6 @@ protected void initPageMemoryDataStructures(DataStorageConfiguration dbCfg) thro freeListName, memMetrics, memPlc, - null, persistenceEnabled ? cctx.wal() : null, 0L, true, @@ -906,9 +909,10 @@ public void beforeExchange(GridDhtPartitionsExchangeFuture discoEvt) throws Igni * Perform memory restore before {@link GridDiscoveryManager} start. * * @param kctx Current kernal context. + * @param startTimer Holder of start time of stages. * @throws IgniteCheckedException If fails. */ - public void startMemoryRestore(GridKernalContext kctx) throws IgniteCheckedException { + public void startMemoryRestore(GridKernalContext kctx, TimeBag startTimer) throws IgniteCheckedException { // No-op. } @@ -983,6 +987,68 @@ public void releaseHistoryForPreloading() { // No-op } + /** + * Checks that the given {@code region} has enough space for putting a new entry. + * + * This method makes sense then and only then + * the data region is not persisted {@link DataRegionConfiguration#isPersistenceEnabled()} + * and page eviction is disabled {@link DataPageEvictionMode#DISABLED}. + * + * The non-persistent region should reserve a number of pages to support a free list {@link AbstractFreeList}. + * For example, removing a row from underlying store may require allocating a new data page + * in order to move a tracked page from one bucket to another one which does not have a free space for a new stripe. + * See {@link AbstractFreeList#removeDataRowByLink}. + * Therefore, inserting a new entry should be prevented in case of some threshold is exceeded. + * + * @param region Data region to be checked. + * @throws IgniteOutOfMemoryException In case of the given data region does not have enough free space + * for putting a new entry. + */ + public void ensureFreeSpaceForInsert(DataRegion region) throws IgniteOutOfMemoryException { + if (region == null) + return; + + DataRegionConfiguration regCfg = region.config(); + + if (regCfg.getPageEvictionMode() != DataPageEvictionMode.DISABLED || regCfg.isPersistenceEnabled()) + return; + + long memorySize = regCfg.getMaxSize(); + + PageMemory pageMem = region.pageMemory(); + + CacheFreeList freeList = freeListMap.get(regCfg.getName()); + + long nonEmptyPages = (pageMem.loadedPages() - freeList.emptyDataPages()); + + // The maximum number of pages that can be allocated (memorySize / systemPageSize) + // should be greater or equal to the current number of non-empty pages plus + // the number of pages that may be required in order to move all pages to a reuse bucket, + // that is equal to nonEmptyPages * 8 / pageSize, where 8 is the size of a link. + // Note that not the whole page can be used to storing links, + // see PagesListNodeIO and PagesListMetaIO#getCapacity(), so we pessimistically multiply the result on 1.5, + // in any way, the number of required pages is less than 1 percent. + boolean oomThreshold = (memorySize / pageMem.systemPageSize()) < + (nonEmptyPages * (8.0 / pageMem.pageSize() + 1) * 1.5 + 256 /*one page per bucket*/); + + if (oomThreshold) { + IgniteOutOfMemoryException oom = new IgniteOutOfMemoryException("Out of memory in data region [" + + "name=" + regCfg.getName() + + ", initSize=" + U.readableSize(regCfg.getInitialSize(), false) + + ", maxSize=" + U.readableSize(regCfg.getMaxSize(), false) + + ", persistenceEnabled=" + regCfg.isPersistenceEnabled() + "] Try the following:" + U.nl() + + " ^-- Increase maximum off-heap memory size (DataRegionConfiguration.maxSize)" + U.nl() + + " ^-- Enable Ignite persistence (DataRegionConfiguration.persistenceEnabled)" + U.nl() + + " ^-- Enable eviction or expiration policies" + ); + + if (cctx.kernalContext() != null) + cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, oom)); + + throw oom; + } + } + /** * See {@link GridCacheMapEntry#ensureFreeSpace()} * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PageStoreWriter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PageStoreWriter.java new file mode 100644 index 0000000000000..d9f1625026857 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/PageStoreWriter.java @@ -0,0 +1,40 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache.persistence; + +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx; + +/** + * Interface for write page to {@link PageStore}. + */ +public interface PageStoreWriter { + /** + * Callback for write page. {@link PageMemoryEx} will copy page content to buffer before call. + * + * @param fullPageId Page ID to get byte buffer for. The page ID must be present in the collection returned by + * the {@link PageMemoryEx#beginCheckpoint()} method call. + * @param buf Temporary buffer to write changes into. + * @param tag {@code Partition generation} if data was read, {@code null} otherwise (data already saved to storage). + * @throws IgniteCheckedException If write page failed. + */ + void writePage(FullPageId fullPageId, ByteBuffer buf, int tag) throws IgniteCheckedException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java index 4ca6f7f34934a..fde5ca2a946e9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/RowStore.java @@ -25,8 +25,8 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList; import org.apache.ignite.internal.processors.query.GridQueryRowCacheCleaner; -import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.internal.stat.IoStatisticsHolder; +import org.apache.ignite.internal.util.typedef.internal.U; /** * Data store for H2 rows. @@ -50,6 +50,9 @@ public class RowStore { /** Row cache cleaner. */ private GridQueryRowCacheCleaner rowCacheCleaner; + /** */ + protected final CacheGroupContext grp; + /** * @param grp Cache group. * @param freeList Free list. @@ -58,6 +61,7 @@ public RowStore(CacheGroupContext grp, FreeList freeList) { assert grp != null; assert freeList != null; + this.grp = grp; this.freeList = freeList; ctx = grp.shared(); @@ -96,8 +100,11 @@ public void removeRow(long link, IoStatisticsHolder statHolder) throws IgniteChe * @throws IgniteCheckedException If failed. */ public void addRow(CacheDataRow row, IoStatisticsHolder statHolder) throws IgniteCheckedException { - if (!persistenceEnabled) + if (!persistenceEnabled) { + ctx.database().ensureFreeSpaceForInsert(grp.dataRegion()); + freeList.insertDataRow(row, statHolder); + } else { ctx.database().checkpointReadLock(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java index a53d747fcf612..5b302097ca323 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStore.java @@ -24,6 +24,7 @@ import java.nio.channels.ClosedByInterruptException; import java.nio.channels.ClosedChannelException; import java.nio.file.Files; +import java.nio.file.Path; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; @@ -39,6 +40,7 @@ import org.apache.ignite.internal.processors.cache.persistence.wal.crc.FastCrc; import org.apache.ignite.internal.processors.cache.persistence.wal.crc.IgniteDataIntegrityViolationException; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteOutClosure; import static java.nio.file.StandardOpenOption.CREATE; import static java.nio.file.StandardOpenOption.READ; @@ -59,7 +61,7 @@ public class FilePageStore implements PageStore { public static final int HEADER_SIZE = 8/*SIGNATURE*/ + 4/*VERSION*/ + 1/*type*/ + 4/*page size*/; /** */ - private final File cfgFile; + private final IgniteOutClosure pathProvider; /** */ private final byte type; @@ -97,17 +99,15 @@ public class FilePageStore implements PageStore { /** */ private final ReadWriteLock lock = new ReentrantReadWriteLock(); - /** - * @param file File. - */ + /** */ public FilePageStore( byte type, - File file, + IgniteOutClosure pathProvider, FileIOFactory factory, DataStorageConfiguration cfg, AllocatedPageTracker allocatedTracker) { this.type = type; - this.cfgFile = file; + this.pathProvider = pathProvider; this.dbCfg = cfg; this.ioFactory = factory; this.allocated = new AtomicLong(); @@ -117,7 +117,9 @@ public FilePageStore( /** {@inheritDoc} */ @Override public boolean exists() { - return cfgFile.exists() && cfgFile.length() > headerSize(); + File file = pathProvider.apply().toFile(); + + return file.exists() && file.length() > headerSize(); } /** @@ -174,7 +176,7 @@ private long initFile(FileIO fileIO) throws IOException { } catch (ClosedByInterruptException e) { // If thread was interrupted written header can be inconsistent. - Files.delete(cfgFile.toPath()); + Files.delete(pathProvider.apply()); throw e; } @@ -186,7 +188,7 @@ private long initFile(FileIO fileIO) throws IOException { * @return Next available position in the file to store a data. * @throws IOException If check has failed. */ - private long checkFile(FileIO fileIO) throws IOException { + private long checkFile(FileIO fileIO, File cfgFile) throws IOException { ByteBuffer hdr = ByteBuffer.allocate(headerSize()).order(ByteOrder.LITTLE_ENDIAN); fileIO.readFully(hdr); @@ -246,8 +248,10 @@ public void stop(boolean delete) throws StorageException { if (fileIO != null) // Ensure the file is closed even if not initialized yet. fileIO.close(); - if (delete && cfgFile.exists()) - Files.delete(cfgFile.toPath()); + Path path = pathProvider.apply(); + + if (delete && Files.exists(path)) + Files.delete(path); return; } @@ -259,10 +263,10 @@ public void stop(boolean delete) throws StorageException { fileIO = null; if (delete) - Files.delete(cfgFile.toPath()); + Files.delete(pathProvider.apply()); } catch (IOException e) { - throw new StorageException("Failed to stop serving partition file [file=" + cfgFile.getPath() + throw new StorageException("Failed to stop serving partition file [file=" + getFileAbsolutePath() + ", delete=" + delete + "]", e); } finally { @@ -283,6 +287,8 @@ public void stop(boolean delete) throws StorageException { public void truncate(int tag) throws StorageException { init(); + Path filePath = pathProvider.apply(); + lock.writeLock().lock(); try { @@ -294,10 +300,10 @@ public void truncate(int tag) throws StorageException { fileIO = null; - Files.delete(cfgFile.toPath()); + Files.delete(filePath); } catch (IOException e) { - throw new StorageException("Failed to truncate partition file [file=" + cfgFile.getPath() + "]", e); + throw new StorageException("Failed to truncate partition file [file=" + filePath.toAbsolutePath() + "]", e); } finally { allocatedTracker.updateTotalAllocatedPages(-1L * allocated.getAndSet(0) / pageSize); @@ -343,7 +349,7 @@ public void finishRecover() throws StorageException { recover = false; } catch (IOException e) { - throw new StorageException("Failed to finish recover partition file [file=" + cfgFile.getAbsolutePath() + "]", e); + throw new StorageException("Failed to finish recover partition file [file=" + getFileAbsolutePath() + "]", e); } finally { lock.writeLock().unlock(); @@ -362,7 +368,8 @@ public void finishRecover() throws StorageException { assert pageBuf.position() == 0; assert pageBuf.order() == ByteOrder.nativeOrder(); assert off <= allocated.get() : "calculatedOffset=" + off + - ", allocated=" + allocated.get() + ", headerSize=" + headerSize() + ", cfgFile=" + cfgFile; + ", allocated=" + allocated.get() + ", headerSize=" + headerSize() + ", cfgFile=" + + pathProvider.apply().toAbsolutePath(); int n = readWithFailover(pageBuf, off); @@ -385,7 +392,7 @@ public void finishRecover() throws StorageException { if ((savedCrc32 ^ curCrc32) != 0) throw new IgniteDataIntegrityViolationException("Failed to read page (CRC validation failed) " + "[id=" + U.hexLong(pageId) + ", off=" + (off - pageSize) + - ", file=" + cfgFile.getAbsolutePath() + ", fileSize=" + fileIO.size() + + ", file=" + getFileAbsolutePath() + ", fileSize=" + fileIO.size() + ", savedCrc=" + U.hexInt(savedCrc32) + ", curCrc=" + U.hexInt(curCrc32) + ", page=" + U.toHexString(pageBuf) + "]"); @@ -397,7 +404,7 @@ public void finishRecover() throws StorageException { PageIO.setCrc(pageBuf, savedCrc32); } catch (IOException e) { - throw new StorageException("Failed to read page [file=" + cfgFile.getAbsolutePath() + ", pageId=" + pageId + "]", e); + throw new StorageException("Failed to read page [file=" + getFileAbsolutePath() + ", pageId=" + pageId + "]", e); } } @@ -411,7 +418,7 @@ public void finishRecover() throws StorageException { readWithFailover(buf, 0); } catch (IOException e) { - throw new StorageException("Failed to read header [file=" + cfgFile.getAbsolutePath() + "]", e); + throw new StorageException("Failed to read header [file=" + getFileAbsolutePath() + "]", e); } } @@ -435,9 +442,11 @@ private void init() throws StorageException { while (true) { try { + File cfgFile = pathProvider.apply().toFile(); + this.fileIO = fileIO = ioFactory.create(cfgFile, CREATE, READ, WRITE); - newSize = (cfgFile.length() == 0 ? initFile(fileIO) : checkFile(fileIO)) - headerSize(); + newSize = (cfgFile.length() == 0 ? initFile(fileIO) : checkFile(fileIO, cfgFile)) - headerSize(); if (interrupted) Thread.currentThread().interrupt(); @@ -463,7 +472,7 @@ private void init() throws StorageException { } catch (IOException e) { err = new StorageException( - "Failed to initialize partition file: " + cfgFile.getAbsolutePath(), e); + "Failed to initialize partition file: " + getFileAbsolutePath(), e); throw err; } @@ -509,9 +518,11 @@ private void reinit(FileIO fileIO) throws IOException { try { fileIO = null; + File cfgFile = pathProvider.apply().toFile(); + fileIO = ioFactory.create(cfgFile, CREATE, READ, WRITE); - checkFile(fileIO); + checkFile(fileIO, cfgFile); this.fileIO = fileIO; @@ -564,7 +575,7 @@ private void reinit(FileIO fileIO) throws IOException { assert (off >= 0 && off <= allocated.get()) || recover : "off=" + U.hexLong(off) + ", allocated=" + U.hexLong(allocated.get()) + - ", pageId=" + U.hexLong(pageId) + ", file=" + cfgFile.getPath(); + ", pageId=" + U.hexLong(pageId) + ", file=" + getFileAbsolutePath(); assert pageBuf.capacity() == pageSize; assert pageBuf.position() == 0; @@ -622,7 +633,7 @@ private void reinit(FileIO fileIO) throws IOException { } } - throw new StorageException("Failed to write page [file=" + cfgFile.getAbsolutePath() + throw new StorageException("Failed to write page [file=" + getFileAbsolutePath() + ", pageId=" + pageId + ", tag=" + tag + "]", e); } } @@ -661,7 +672,7 @@ private static int calcCrc32(ByteBuffer pageBuf, int pageSize) { fileIO.force(); } catch (IOException e) { - throw new StorageException("Failed to fsync partition file [file=" + cfgFile.getAbsolutePath() + ']', e); + throw new StorageException("Failed to fsync partition file [file=" + getFileAbsolutePath() + ']', e); } finally { lock.writeLock().unlock(); @@ -684,7 +695,7 @@ private static int calcCrc32(ByteBuffer pageBuf, int pageSize) { * @return File absolute path. */ public String getFileAbsolutePath() { - return cfgFile.getAbsolutePath(); + return pathProvider.apply().toAbsolutePath().toString(); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java index fe93d0743be07..6271b8b638269 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreFactory.java @@ -18,19 +18,39 @@ package org.apache.ignite.internal.processors.cache.persistence.file; import java.io.File; +import java.nio.file.Path; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.PageIdAllocator; import org.apache.ignite.internal.processors.cache.persistence.AllocatedPageTracker; +import org.apache.ignite.lang.IgniteOutClosure; /** * */ public interface FilePageStoreFactory { /** - * Creates instance of FilePageStore based on given file. + * Creates instance of PageStore based on given file. * * @param type Data type, can be {@link PageIdAllocator#FLAG_IDX} or {@link PageIdAllocator#FLAG_DATA}. * @param file File Page store file. + * @param allocatedTracker metrics updater. + * @return page store + * @throws IgniteCheckedException if failed. */ - public FilePageStore createPageStore(byte type, File file, AllocatedPageTracker allocatedTracker) throws IgniteCheckedException; + default FilePageStore createPageStore(byte type, File file, AllocatedPageTracker allocatedTracker) + throws IgniteCheckedException { + return createPageStore(type, file::toPath, allocatedTracker); + } + + /** + * Creates instance of PageStore based on file path provider. + * + * @param type Data type, can be {@link PageIdAllocator#FLAG_IDX} or {@link PageIdAllocator#FLAG_DATA} + * @param pathProvider File Page store path provider. + * @param allocatedTracker metrics updater + * @return page store + * @throws IgniteCheckedException if failed + */ + FilePageStore createPageStore(byte type, IgniteOutClosure pathProvider, AllocatedPageTracker allocatedTracker) + throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java index dee98da16666e..47cbf355e05dd 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreManager.java @@ -27,8 +27,10 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.file.DirectoryStream; +import java.nio.file.FileSystems; import java.nio.file.Files; import java.nio.file.Path; +import java.nio.file.PathMatcher; import java.nio.file.StandardCopyOption; import java.util.Arrays; import java.util.Collection; @@ -126,6 +128,10 @@ public class FilePageStoreManager extends GridCacheSharedManagerAdapter implemen /** */ public static final String META_STORAGE_NAME = "metastorage"; + /** Matcher for searching of *.tmp files. */ + public static final PathMatcher TMP_FILE_MATCHER = + FileSystems.getDefault().getPathMatcher("glob:**" + TMP_SUFFIX); + /** Marshaller. */ private static final Marshaller marshaller = new JdkMarshaller(); @@ -450,7 +456,7 @@ public FilePageStoreManager(GridKernalContext ctx) { } /** {@inheritDoc} */ - @Override public void onPartitionCreated(int grpId, int partId) throws IgniteCheckedException { + @Override public void onPartitionCreated(int grpId, int partId) { // No-op. } @@ -555,7 +561,7 @@ public PageStore writeInternal(int cacheId, long pageId, ByteBuffer pageBuf, int * */ public Path getPath(boolean isSharedGroup, String cacheOrGroupName, int partId) { - return getPartitionFile(cacheWorkDir(isSharedGroup, cacheOrGroupName), partId).toPath(); + return getPartitionFilePath(cacheWorkDir(isSharedGroup, cacheOrGroupName), partId); } /** @@ -617,14 +623,15 @@ private CacheStoreHolder initDir(File cacheWorkDir, FilePageStore[] partStores = new FilePageStore[partitions]; for (int partId = 0; partId < partStores.length; partId++) { - FilePageStore partStore = - pageStoreFactory.createPageStore( - PageMemory.FLAG_DATA, - getPartitionFile(cacheWorkDir, partId), - allocatedTracker); + final int p = partId; - partStores[partId] = partStore; - } + FilePageStore partStore = pageStoreFactory.createPageStore( + PageMemory.FLAG_DATA, + () -> getPartitionFilePath(cacheWorkDir, p), + allocatedTracker); + + partStores[partId] = partStore; + } return new CacheStoreHolder(idxStore, partStores); } @@ -640,8 +647,8 @@ private CacheStoreHolder initDir(File cacheWorkDir, * @param cacheWorkDir Cache work directory. * @param partId Partition id. */ - @NotNull private File getPartitionFile(File cacheWorkDir, int partId) { - return new File(cacheWorkDir, format(PART_FILE_TEMPLATE, partId)); + @NotNull private Path getPartitionFilePath(File cacheWorkDir, int partId) { + return new File(cacheWorkDir, String.format(PART_FILE_TEMPLATE, partId)).toPath(); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java index d8c800d39b9a6..4b0dc198109f6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FilePageStoreV2.java @@ -16,9 +16,10 @@ */ package org.apache.ignite.internal.processors.cache.persistence.file; -import java.io.File; +import java.nio.file.Path; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.processors.cache.persistence.AllocatedPageTracker; +import org.apache.ignite.lang.IgniteOutClosure; /** * @@ -31,19 +32,21 @@ public class FilePageStoreV2 extends FilePageStore { private final int hdrSize; /** + * Constructor which initializes file path provider closure, allowing to calculate file path in any time. + * * @param type Type. - * @param file File. + * @param pathProvider file path provider. * @param factory Factory. * @param cfg Config. - * @param allocatedTracker Metrics updater + * @param allocatedTracker Allocated tracker. */ public FilePageStoreV2( byte type, - File file, + IgniteOutClosure pathProvider, FileIOFactory factory, DataStorageConfiguration cfg, AllocatedPageTracker allocatedTracker) { - super(type, file, factory, cfg, allocatedTracker); + super(type, pathProvider, factory, cfg, allocatedTracker); hdrSize = cfg.getPageSize(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java index bc938a57912fc..62266392b5ada 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/file/FileVersionCheckingFactory.java @@ -17,13 +17,15 @@ package org.apache.ignite.internal.processors.cache.persistence.file; -import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; import java.nio.ByteOrder; +import java.nio.file.Files; +import java.nio.file.Path; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.internal.processors.cache.persistence.AllocatedPageTracker; +import org.apache.ignite.lang.IgniteOutClosure; /** * Checks version in files if it's present on the disk, creates store with latest version otherwise. @@ -73,16 +75,18 @@ public FileVersionCheckingFactory(FileIOFactory fileIOFactory, DataStorageConfig /** {@inheritDoc} */ @Override public FilePageStore createPageStore( byte type, - File file, + IgniteOutClosure pathProvider, AllocatedPageTracker allocatedTracker) throws IgniteCheckedException { - if (!file.exists()) - return createPageStore(type, file, latestVersion(), allocatedTracker); + Path filePath = pathProvider.apply(); - try (FileIO fileIO = fileIOFactoryStoreV1.create(file)) { + if (!Files.exists(filePath)) + return createPageStore(type, pathProvider, latestVersion(), allocatedTracker); + + try (FileIO fileIO = fileIOFactoryStoreV1.create(filePath.toFile())) { int minHdr = FilePageStore.HEADER_SIZE; if (fileIO.size() < minHdr) - return createPageStore(type, file, latestVersion(), allocatedTracker); + return createPageStore(type, pathProvider, latestVersion(), allocatedTracker); ByteBuffer hdr = ByteBuffer.allocate(minHdr).order(ByteOrder.LITTLE_ENDIAN); @@ -94,10 +98,10 @@ public FileVersionCheckingFactory(FileIOFactory fileIOFactory, DataStorageConfig int ver = hdr.getInt(); - return createPageStore(type, file, ver, allocatedTracker); + return createPageStore(type, pathProvider, ver, allocatedTracker); } catch (IOException e) { - throw new IgniteCheckedException("Error while creating file page store [file=" + file + "]:", e); + throw new IgniteCheckedException("Error while creating file page store [file=" + filePath.toAbsolutePath() + "]:", e); } } @@ -120,24 +124,24 @@ public int latestVersion() { * Instantiates specific version of FilePageStore. * * @param type Type. - * @param file File. * @param ver Version. * @param allocatedTracker Metrics updater */ public FilePageStore createPageStore( byte type, - File file, + IgniteOutClosure pathProvider, int ver, AllocatedPageTracker allocatedTracker) { + switch (ver) { case FilePageStore.VERSION: - return new FilePageStore(type, file, fileIOFactoryStoreV1, memCfg, allocatedTracker); + return new FilePageStore(type, pathProvider, fileIOFactoryStoreV1, memCfg, allocatedTracker); case FilePageStoreV2.VERSION: - return new FilePageStoreV2(type, file, fileIOFactory, memCfg, allocatedTracker); + return new FilePageStoreV2(type, pathProvider, fileIOFactory, memCfg, allocatedTracker); default: - throw new IllegalArgumentException("Unknown version of file page store: " + ver + " for file [" + file.getAbsolutePath() + "]"); + throw new IllegalArgumentException("Unknown version of file page store: " + ver + " for file [" + pathProvider.apply().toAbsolutePath() + "]"); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java index 755610cff59f6..f6f549563619e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/freelist/CacheFreeList.java @@ -23,7 +23,6 @@ import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; -import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; import org.apache.ignite.internal.stat.IoStatisticsHolder; import org.apache.ignite.internal.util.typedef.internal.U; @@ -37,7 +36,6 @@ public class CacheFreeList extends AbstractFreeList { * @param name Name. * @param regionMetrics Region metrics. * @param dataRegion Data region. - * @param reuseList Reuse list. * @param wal Wal. * @param metaPageId Meta page id. * @param initNew Initialize new. @@ -47,7 +45,6 @@ public CacheFreeList( String name, DataRegionMetricsImpl regionMetrics, DataRegion dataRegion, - ReuseList reuseList, IgniteWriteAheadLogManager wal, long metaPageId, boolean initNew, @@ -58,7 +55,7 @@ public CacheFreeList( name, regionMetrics, dataRegion, - reuseList, + null, wal, metaPageId, initNew, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageWrite.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageStoreWrite.java similarity index 90% rename from modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageWrite.java rename to modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageStoreWrite.java index b08ddc2f89146..2061b4ad4160a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageWrite.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedDirtyPageStoreWrite.java @@ -20,6 +20,7 @@ import java.nio.ByteBuffer; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.processors.cache.persistence.PageStoreWriter; import org.apache.ignite.internal.util.GridUnsafe; import org.jetbrains.annotations.Nullable; @@ -28,9 +29,9 @@ * content without holding segment lock. Page data is copied into temp buffer during {@link #writePage(FullPageId, * ByteBuffer, int)} and then sent to real implementation by {@link #finishReplacement()}. */ -public class DelayedDirtyPageWrite implements ReplacedPageWriter { +public class DelayedDirtyPageStoreWrite implements PageStoreWriter { /** Real flush dirty page implementation. */ - private final ReplacedPageWriter flushDirtyPage; + private final PageStoreWriter flushDirtyPage; /** Page size. */ private final int pageSize; @@ -56,9 +57,12 @@ public class DelayedDirtyPageWrite implements ReplacedPageWriter { * @param pageSize page size. * @param tracker tracker to lock/unlock page reads. */ - public DelayedDirtyPageWrite(ReplacedPageWriter flushDirtyPage, - ThreadLocal byteBufThreadLoc, int pageSize, - DelayedPageReplacementTracker tracker) { + public DelayedDirtyPageStoreWrite( + PageStoreWriter flushDirtyPage, + ThreadLocal byteBufThreadLoc, + int pageSize, + DelayedPageReplacementTracker tracker + ) { this.flushDirtyPage = flushDirtyPage; this.pageSize = pageSize; this.byteBufThreadLoc = byteBufThreadLoc; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedPageReplacementTracker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedPageReplacementTracker.java index aa1b06161c042..83033b2422ab8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedPageReplacementTracker.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/DelayedPageReplacementTracker.java @@ -26,6 +26,7 @@ import org.apache.ignite.IgniteInterruptedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.internal.pagemem.FullPageId; +import org.apache.ignite.internal.processors.cache.persistence.PageStoreWriter; /** * Delayed page writes tracker. Provides delayed write implementations and allows to check if page is actually being @@ -36,7 +37,7 @@ public class DelayedPageReplacementTracker { private final int pageSize; /** Flush dirty page real implementation. */ - private final ReplacedPageWriter flushDirtyPage; + private final PageStoreWriter flushDirtyPage; /** Logger. */ private final IgniteLogger log; @@ -57,11 +58,11 @@ public class DelayedPageReplacementTracker { }; /** - * Dirty page write for replacement operations thread local. Because page write {@link DelayedDirtyPageWrite} is + * Dirty page write for replacement operations thread local. Because page write {@link DelayedDirtyPageStoreWrite} is * stateful and not thread safe, this thread local protects from GC pressure on pages replacement.
Map is used * instead of build-in thread local to allow GC to remove delayed writers for alive threads after node stop. */ - private final Map delayedPageWriteThreadLocMap = new ConcurrentHashMap<>(); + private final Map delayedPageWriteThreadLocMap = new ConcurrentHashMap<>(); /** * @param pageSize Page size. @@ -69,8 +70,12 @@ public class DelayedPageReplacementTracker { * @param log Logger. * @param segmentCnt Segments count. */ - public DelayedPageReplacementTracker(int pageSize, ReplacedPageWriter flushDirtyPage, - IgniteLogger log, int segmentCnt) { + public DelayedPageReplacementTracker( + int pageSize, + PageStoreWriter flushDirtyPage, + IgniteLogger log, + int segmentCnt + ) { this.pageSize = pageSize; this.flushDirtyPage = flushDirtyPage; this.log = log; @@ -83,9 +88,9 @@ public DelayedPageReplacementTracker(int pageSize, ReplacedPageWriter flushDirty /** * @return delayed page write implementation, finish method to be called to actually write page. */ - public DelayedDirtyPageWrite delayedPageWrite() { + public DelayedDirtyPageStoreWrite delayedPageWrite() { return delayedPageWriteThreadLocMap.computeIfAbsent(Thread.currentThread().getId(), - id -> new DelayedDirtyPageWrite(flushDirtyPage, byteBufThreadLoc, pageSize, this)); + id -> new DelayedDirtyPageStoreWrite(flushDirtyPage, byteBufThreadLoc, pageSize, this)); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java index 9b0cdb2eb023c..8debe6344a7b2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryEx.java @@ -23,11 +23,11 @@ import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.PageStoreWriter; import org.apache.ignite.internal.processors.cache.persistence.StorageException; import org.apache.ignite.internal.stat.IoStatisticsHolder; import org.apache.ignite.internal.util.GridMultiCollectionWrapper; import org.apache.ignite.lang.IgniteBiTuple; -import org.jetbrains.annotations.Nullable; /** * @@ -127,16 +127,22 @@ public long acquirePage(int grpId, long pageId, IoStatisticsHolder statHldr, public void finishCheckpoint(); /** - * Gets page byte buffer for the checkpoint procedure. + * Prepare page for write during checkpoint. + *{@link PageStoreWriter} will be called when the page will be ready to write. * * @param pageId Page ID to get byte buffer for. The page ID must be present in the collection returned by * the {@link #beginCheckpoint()} method call. - * @param outBuf Temporary buffer to write changes into. + * @param buf Temporary buffer to write changes into. + * @param pageWriter Checkpoint page write context. * @param tracker Checkpoint metrics tracker. - * @return {@code Partition generation} if data was read, {@code null} otherwise (data already saved to storage). - * @throws IgniteException If failed to obtain page data. + * @throws IgniteCheckedException If failed to obtain page data. */ - @Nullable public Integer getForCheckpoint(FullPageId pageId, ByteBuffer outBuf, CheckpointMetricsTracker tracker); + public void checkpointWritePage( + FullPageId pageId, + ByteBuffer buf, + PageStoreWriter pageWriter, + CheckpointMetricsTracker tracker + ) throws IgniteCheckedException; /** * Marks partition as invalid / outdated. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java index 1f8df6fc14859..b2a0f31da5a64 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PageMemoryImpl.java @@ -64,6 +64,7 @@ import org.apache.ignite.internal.processors.cache.persistence.CheckpointLockStateChecker; import org.apache.ignite.internal.processors.cache.persistence.CheckpointWriteProgressSupplier; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; +import org.apache.ignite.internal.processors.cache.persistence.PageStoreWriter; import org.apache.ignite.internal.processors.cache.persistence.StorageException; import org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListMetaIO; import org.apache.ignite.internal.processors.cache.persistence.partstate.GroupPartitionId; @@ -237,7 +238,7 @@ public class PageMemoryImpl implements PageMemoryEx { private OffheapReadWriteLock rwLock; /** Flush dirty page closure. When possible, will be called by evictPage(). */ - private final ReplacedPageWriter flushDirtyPage; + private final PageStoreWriter flushDirtyPage; /** */ private final AtomicBoolean dirtyUserPagesPresent = new AtomicBoolean(); @@ -290,7 +291,7 @@ public PageMemoryImpl( long[] sizes, GridCacheSharedContext ctx, int pageSize, - ReplacedPageWriter flushDirtyPage, + PageStoreWriter flushDirtyPage, @Nullable GridInClosure3X changeTracker, CheckpointLockStateChecker stateChecker, DataRegionMetricsImpl memMetrics, @@ -484,7 +485,7 @@ else if (throttlingPlc == ThrottlingPolicy.CHECKPOINT_BUFFER_ONLY) // because there is no crc inside them. Segment seg = segment(grpId, pageId); - DelayedDirtyPageWrite delayedWriter = delayedPageReplacementTracker != null + DelayedDirtyPageStoreWrite delayedWriter = delayedPageReplacementTracker != null ? delayedPageReplacementTracker.delayedPageWrite() : null; FullPageId fullId = new FullPageId(pageId, grpId); @@ -675,7 +676,7 @@ private DataRegionConfiguration getDataRegionConfiguration() { seg.readLock().unlock(); } - DelayedDirtyPageWrite delayedWriter = delayedPageReplacementTracker != null + DelayedDirtyPageStoreWrite delayedWriter = delayedPageReplacementTracker != null ? delayedPageReplacementTracker.delayedPageWrite() : null; seg.writeLock().lock(); @@ -858,7 +859,7 @@ private long refreshOutdatedPage(Segment seg, int grpId, long pageId, boolean rm // We pinned the page when allocated the temp buffer, release it now. PageHeader.releasePage(absPtr); - checkpointPool.releaseFreePage(tmpBufPtr); + releaseCheckpointBufferPage(tmpBufPtr); } if (rmv) @@ -877,6 +878,14 @@ private long refreshOutdatedPage(Segment seg, int grpId, long pageId, boolean rm return relPtr; } + /** */ + private void releaseCheckpointBufferPage(long tmpBufPtr) { + int resCntr = checkpointPool.releaseFreePage(tmpBufPtr); + + if (resCntr == checkpointBufferPagesSize() / 2 && writeThrottle != null) + writeThrottle.tryWakeupThrottledThreads(); + } + /** * Restores page from WAL page snapshot & delta records. * @@ -1076,8 +1085,13 @@ private boolean isThrottlingEnabled() { } /** {@inheritDoc} */ - @Override public Integer getForCheckpoint(FullPageId fullId, ByteBuffer outBuf, CheckpointMetricsTracker tracker) { - assert outBuf.remaining() == pageSize(); + @Override public void checkpointWritePage( + FullPageId fullId, + ByteBuffer buf, + PageStoreWriter pageStoreWriter, + CheckpointMetricsTracker metricsTracker + ) throws IgniteCheckedException { + assert buf.remaining() == pageSize(); Segment seg = segment(fullId.groupId(), fullId.pageId()); @@ -1093,21 +1107,13 @@ private boolean isThrottlingEnabled() { try { if (!isInCheckpoint(fullId)) - return null; - - tag = seg.partGeneration(fullId.groupId(), PageIdUtils.partId(fullId.pageId())); + return; - relPtr = seg.loadedPages.get( - fullId.groupId(), - PageIdUtils.effectivePageId(fullId.pageId()), - tag, - INVALID_REL_PTR, - OUTDATED_REL_PTR - ); + relPtr = resolveRelativePointer(seg, fullId, tag = generationTag(seg, fullId)); // Page may have been cleared during eviction. We have nothing to do in this case. if (relPtr == INVALID_REL_PTR) - return null; + return; if (relPtr != OUTDATED_REL_PTR) { absPtr = seg.absolute(relPtr); @@ -1128,19 +1134,10 @@ private boolean isThrottlingEnabled() { try { // Double-check. - relPtr = seg.loadedPages.get( - fullId.groupId(), - PageIdUtils.effectivePageId(fullId.pageId()), - seg.partGeneration( - fullId.groupId(), - PageIdUtils.partId(fullId.pageId()) - ), - INVALID_REL_PTR, - OUTDATED_REL_PTR - ); + relPtr = resolveRelativePointer(seg, fullId, generationTag(seg, fullId)); if (relPtr == INVALID_REL_PTR) - return null; + return; if (relPtr == OUTDATED_REL_PTR) { relPtr = refreshOutdatedPage( @@ -1153,36 +1150,40 @@ private boolean isThrottlingEnabled() { seg.pool.releaseFreePage(relPtr); } - return null; + return; } finally { seg.writeLock().unlock(); } } - else - return copyPageForCheckpoint(absPtr, fullId, outBuf, pageSingleAcquire, tracker) ? tag : TRY_AGAIN_TAG; + + copyPageForCheckpoint(absPtr, fullId, buf, tag, pageSingleAcquire, pageStoreWriter, metricsTracker); } /** * @param absPtr Absolute ptr. * @param fullId Full id. - * @param outBuf Output buffer to write page content into. + * @param buf Buffer for copy page content for future write via {@link PageStoreWriter}. * @param pageSingleAcquire Page is acquired only once. We don't pin the page second time (until page will not be * copied) in case checkpoint temporary buffer is used. - * @param tracker Checkpoint statistics tracker. - * - * @return False if someone else holds lock on page. + * @param pageStoreWriter Checkpoint page write context. */ - private boolean copyPageForCheckpoint( + private void copyPageForCheckpoint( long absPtr, FullPageId fullId, - ByteBuffer outBuf, + ByteBuffer buf, + Integer tag, boolean pageSingleAcquire, + PageStoreWriter pageStoreWriter, CheckpointMetricsTracker tracker - ) { + ) throws IgniteCheckedException { assert absPtr != 0; assert PageHeader.isAcquired(absPtr); + // Exception protection flag. + // No need to write if exception occurred. + boolean canWrite = false; + boolean locked = rwLock.tryWriteLock(absPtr + PAGE_LOCK_OFFSET, OffheapReadWriteLock.TAG_LOCK_ALWAYS); if (!locked) { @@ -1191,7 +1192,11 @@ private boolean copyPageForCheckpoint( if (!pageSingleAcquire) PageHeader.releasePage(absPtr); - return false; + buf.clear(); + + pageStoreWriter.writePage(fullId, buf, TRY_AGAIN_TAG); + + return; } try { @@ -1206,37 +1211,44 @@ private boolean copyPageForCheckpoint( long tmpAbsPtr = checkpointPool.absolute(tmpRelPtr); - copyInBuffer(tmpAbsPtr, outBuf); + copyInBuffer(tmpAbsPtr, buf); GridUnsafe.setMemory(tmpAbsPtr + PAGE_OVERHEAD, pageSize(), (byte)0); if (tracker != null) tracker.onCowPageWritten(); - checkpointPool.releaseFreePage(tmpRelPtr); + releaseCheckpointBufferPage(tmpRelPtr); // Need release again because we pin page when resolve abs pointer, // and page did not have tmp buffer page. if (!pageSingleAcquire) PageHeader.releasePage(absPtr); - } else { - copyInBuffer(absPtr, outBuf); + copyInBuffer(absPtr, buf); PageHeader.dirty(absPtr, false); } - assert PageIO.getType(outBuf) != 0 : "Invalid state. Type is 0! pageId = " + U.hexLong(fullId.pageId()); - assert PageIO.getVersion(outBuf) != 0 : "Invalid state. Version is 0! pageId = " + U.hexLong(fullId.pageId()); - - memMetrics.onPageWritten(); + assert PageIO.getType(buf) != 0 : "Invalid state. Type is 0! pageId = " + U.hexLong(fullId.pageId()); + assert PageIO.getVersion(buf) != 0 : "Invalid state. Version is 0! pageId = " + U.hexLong(fullId.pageId()); - return true; + canWrite = true; } finally { rwLock.writeUnlock(absPtr + PAGE_LOCK_OFFSET, OffheapReadWriteLock.TAG_LOCK_ALWAYS); + if (canWrite){ + buf.rewind(); + + pageStoreWriter.writePage(fullId, buf, tag); + + memMetrics.onPageWritten(); + + buf.rewind(); + } + // We pinned the page either when allocated the temp buffer, or when resolved abs pointer. // Must release the page only after write unlock. PageHeader.releasePage(absPtr); @@ -1266,6 +1278,38 @@ private void copyInBuffer(long absPtr, ByteBuffer buf) { } } + /** + * Get current prartition generation tag. + * + * @param seg Segment. + * @param fullId Full page id. + * @return Current partition generation tag. + */ + private int generationTag(Segment seg, FullPageId fullId) { + return seg.partGeneration( + fullId.groupId(), + PageIdUtils.partId(fullId.pageId()) + ); + } + + /** + * Resolver relative pointer via {@link LoadedPagesMap}. + * + * @param seg Segment. + * @param fullId Full page id. + * @param reqVer Required version. + * @return Relative pointer. + */ + private long resolveRelativePointer(Segment seg, FullPageId fullId, int reqVer) { + return seg.loadedPages.get( + fullId.groupId(), + PageIdUtils.effectivePageId(fullId.pageId()), + reqVer, + INVALID_REL_PTR, + OUTDATED_REL_PTR + ); + } + /** {@inheritDoc} */ @Override public int invalidate(int grpId, int partId) { int tag = 0; @@ -1375,6 +1419,30 @@ public long acquiredPages() { return total; } + /** + * @param fullPageId Full page ID to check. + * @return {@code true} if the page is contained in the loaded pages table, {@code false} otherwise. + */ + public boolean hasLoadedPage(FullPageId fullPageId) { + int grpId = fullPageId.groupId(); + long pageId = PageIdUtils.effectivePageId(fullPageId.pageId()); + int partId = PageIdUtils.partId(pageId); + + Segment seg = segment(grpId, pageId); + + seg.readLock().lock(); + + try { + long res = + seg.loadedPages.get(grpId, pageId, seg.partGeneration(grpId, partId), INVALID_REL_PTR, INVALID_REL_PTR); + + return res != INVALID_REL_PTR; + } + finally { + seg.readLock().unlock(); + } + } + /** * @param absPtr Absolute pointer to read lock. * @param fullId Full page ID. @@ -1517,38 +1585,58 @@ private void writeUnlockPage( ) { boolean wasDirty = isDirty(page); - //if page is for restore, we shouldn't mark it as changed - if (!restore && markDirty && !wasDirty && changeTracker != null) - changeTracker.apply(page, fullId, this); + try { + //if page is for restore, we shouldn't mark it as changed + if (!restore && markDirty && !wasDirty && changeTracker != null) + changeTracker.apply(page, fullId, this); - boolean pageWalRec = markDirty && walPlc != FALSE && (walPlc == TRUE || !wasDirty); + boolean pageWalRec = markDirty && walPlc != FALSE && (walPlc == TRUE || !wasDirty); - assert GridUnsafe.getInt(page + PAGE_OVERHEAD + 4) == 0; //TODO GG-11480 + assert GridUnsafe.getInt(page + PAGE_OVERHEAD + 4) == 0; //TODO GG-11480 - if (markDirty) - setDirty(fullId, page, markDirty, false); + if (markDirty) + setDirty(fullId, page, markDirty, false); - beforeReleaseWrite(fullId, page + PAGE_OVERHEAD, pageWalRec); + beforeReleaseWrite(fullId, page + PAGE_OVERHEAD, pageWalRec); + } + // Always release the lock. + finally { + long pageId = PageIO.getPageId(page + PAGE_OVERHEAD); - long pageId = PageIO.getPageId(page + PAGE_OVERHEAD); + try { + assert pageId != 0 : U.hexLong(PageHeader.readPageId(page)); - assert pageId != 0 : U.hexLong(PageHeader.readPageId(page)); - assert PageIO.getVersion(page + PAGE_OVERHEAD) != 0 : U.hexLong(pageId); - assert PageIO.getType(page + PAGE_OVERHEAD) != 0 : U.hexLong(pageId); + rwLock.writeUnlock(page + PAGE_LOCK_OFFSET, PageIdUtils.tag(pageId)); - try { - rwLock.writeUnlock(page + PAGE_LOCK_OFFSET, PageIdUtils.tag(pageId)); + assert PageIO.getVersion(page + PAGE_OVERHEAD) != 0 : dumpPage(pageId, fullId.groupId()); + assert PageIO.getType(page + PAGE_OVERHEAD) != 0 : U.hexLong(pageId); - if (throttlingPlc != ThrottlingPolicy.DISABLED && !restore && markDirty && !wasDirty) - writeThrottle.onMarkDirty(isInCheckpoint(fullId)); - } - catch (AssertionError ex) { - U.error(log, "Failed to unlock page [fullPageId=" + fullId + ", binPage=" + U.toHexString(page, systemPageSize()) + ']'); + if (throttlingPlc != ThrottlingPolicy.DISABLED && !restore && markDirty && !wasDirty) + writeThrottle.onMarkDirty(isInCheckpoint(fullId)); + } + catch (AssertionError ex) { + U.error(log, "Failed to unlock page [fullPageId=" + fullId + + ", binPage=" + U.toHexString(page, systemPageSize()) + ']'); - throw ex; + throw ex; + } } } + /** + * Prepares page details for assertion. + * @param pageId Page id. + * @param grpId Group id. + */ + @NotNull private String dumpPage(long pageId, int grpId) { + int pageIdx = PageIdUtils.pageIndex(pageId); + int partId = PageIdUtils.partId(pageId); + long off = (long)(pageIdx + 1) * pageSize(); + + return U.hexLong(pageId) + " (grpId=" + grpId + ", pageIdx=" + pageIdx + ", partId=" + partId + ", offH=" + + Long.toHexString(off) + ")"; + } + /** * @param absPtr Absolute pointer to the page. * @return {@code True} if write lock acquired for the page. @@ -1824,14 +1912,17 @@ private long allocateFreePage(long pageId) throws GridOffHeapOutOfMemoryExceptio /** * @param relPtr Relative pointer to free. + * @return Resulting number of pages in pool if pages counter is enabled, 0 otherwise. */ - private void releaseFreePage(long relPtr) { + private int releaseFreePage(long relPtr) { long absPtr = absolute(relPtr); assert !PageHeader.isAcquired(absPtr) : "Release pinned page: " + PageHeader.fullPageId(absPtr); + int resCntr = 0; + if (pagesCntr != null) - pagesCntr.getAndDecrement(); + resCntr = pagesCntr.decrementAndGet(); while (true) { long freePageRelPtrMasked = GridUnsafe.getLong(freePageListPtr); @@ -1841,7 +1932,7 @@ private void releaseFreePage(long relPtr) { GridUnsafe.putLong(absPtr, freePageRelPtr); if (GridUnsafe.compareAndSwapLong(null, freePageListPtr, freePageRelPtrMasked, relPtr)) - return; + return resCntr; } } @@ -2062,7 +2153,7 @@ private long borrowOrAllocateFreePage(long pageId) { * @return {@code True} if it is ok to replace this page, {@code false} if another page should be selected. * @throws IgniteCheckedException If failed to write page to the underlying store during eviction. */ - private boolean preparePageRemoval(FullPageId fullPageId, long absPtr, ReplacedPageWriter saveDirtyPage) throws IgniteCheckedException { + private boolean preparePageRemoval(FullPageId fullPageId, long absPtr, PageStoreWriter saveDirtyPage) throws IgniteCheckedException { assert writeLock().isHeldByCurrentThread(); // Do not evict cache meta pages. @@ -2155,7 +2246,7 @@ private void clearRowCache(FullPageId fullPageId, long absPtr) throws IgniteChec * @throws IgniteCheckedException If failed to evict page. * @param saveDirtyPage Replaced page writer, implementation to save dirty page to persistent storage. */ - private long removePageForReplacement(ReplacedPageWriter saveDirtyPage) throws IgniteCheckedException { + private long removePageForReplacement(PageStoreWriter saveDirtyPage) throws IgniteCheckedException { assert getWriteHoldCount() > 0; if (!pageReplacementWarned) { @@ -2326,7 +2417,7 @@ private boolean isStoreMetadataPage(long absPageAddr) { * @param cap Capacity. * @param saveDirtyPage Evicted page writer. */ - private long tryToFindSequentially(int cap, ReplacedPageWriter saveDirtyPage) throws IgniteCheckedException { + private long tryToFindSequentially(int cap, PageStoreWriter saveDirtyPage) throws IgniteCheckedException { assert getWriteHoldCount() > 0; long prevAddr = INVALID_REL_PTR; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteSpeedBasedThrottle.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteSpeedBasedThrottle.java index 2dd81275d108e..d497bdafa5e4b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteSpeedBasedThrottle.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteSpeedBasedThrottle.java @@ -288,7 +288,7 @@ private void recurrentLogIfNeed() { if (weight <= WARN_THRESHOLD) return; - if (prevWarnTime.compareAndSet(prevWarningNs, curNs)) { + if (prevWarnTime.compareAndSet(prevWarningNs, curNs) && log.isInfoEnabled()) { String msg = String.format("Throttling is applied to page modifications " + "[percentOfPartTime=%.2f, markDirty=%d pages/sec, checkpointWrite=%d pages/sec, " + "estIdealMarkDirty=%d pages/sec, curDirty=%.2f, maxDirty=%.2f, avgParkTime=%d ns, " + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java index 2828c4348af8d..35a7dab30b580 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottle.java @@ -95,11 +95,8 @@ public PagesWriteThrottle(PageMemoryImpl pageMemory, boolean shouldThrottle = false; - if (isPageInCheckpoint) { - int checkpointBufLimit = (int)(pageMemory.checkpointBufferPagesSize() * CP_BUF_FILL_THRESHOLD); - - shouldThrottle = pageMemory.checkpointBufferPagesCount() > checkpointBufLimit; - } + if (isPageInCheckpoint) + shouldThrottle = shouldThrottle(); if (!shouldThrottle && !throttleOnlyPagesInCheckpoint) { AtomicInteger writtenPagesCntr = cpProgress.writtenPagesCounter(); @@ -152,6 +149,16 @@ public PagesWriteThrottle(PageMemoryImpl pageMemory, } } + /** {@inheritDoc} */ + @Override public void tryWakeupThrottledThreads() { + if (!shouldThrottle()) { + inCheckpointBackoffCntr.set(0); + + parkThrds.forEach(LockSupport::unpark); + parkThrds.clear(); + } + } + /** {@inheritDoc} */ @Override public void onBeginCheckpoint() { } @@ -162,4 +169,13 @@ public PagesWriteThrottle(PageMemoryImpl pageMemory, notInCheckpointBackoffCntr.set(0); } + + /** + * @return {@code True} if throttling should be enabled, and {@code False} otherwise. + */ + private boolean shouldThrottle() { + int checkpointBufLimit = (int)(pageMemory.checkpointBufferPagesSize() * CP_BUF_FILL_THRESHOLD); + + return pageMemory.checkpointBufferPagesCount() > checkpointBufLimit; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java index e6aab794761eb..a271ed973ae9d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/pagemem/PagesWriteThrottlePolicy.java @@ -37,6 +37,13 @@ public interface PagesWriteThrottlePolicy { */ void onMarkDirty(boolean isPageInCheckpoint); + /** + * Callback to try wakeup throttled threads. + */ + default void tryWakeupThrottledThreads() { + // No-op. + } + /** * Callback to notify throttling policy checkpoint was started. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java index bf322bd485ceb..68797bcfd3148 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/BPlusTree.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.cache.persistence.tree; import java.io.Externalizable; +import java.sql.SQLException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -31,6 +32,7 @@ import org.apache.ignite.failure.FailureContext; import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.IgniteInterruptedCheckedException; +import org.apache.ignite.internal.IgniteVersionUtils; import org.apache.ignite.internal.UnregisteredBinaryTypeException; import org.apache.ignite.internal.UnregisteredClassException; import org.apache.ignite.internal.pagemem.PageIdUtils; @@ -42,7 +44,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.InsertRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageAddRootRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageCutRootRecord; -import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineFlagsCreatedVersionRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.NewRootInitRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.RemoveRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.ReplaceRecord; @@ -97,6 +99,9 @@ public abstract class BPlusTree extends DataStructure implements /** Wrapper for tree pages operations. Noop by default. Override for test purposes. */ public static volatile PageHandlerWrapper pageHndWrapper = (tree, hnd) -> hnd; + /** Destroy msg. */ + public static final String CONC_DESTROY_MSG = "Tree is being concurrently destroyed: "; + /** */ private static volatile boolean interrupted; @@ -719,9 +724,10 @@ private class InitRoot extends PageHandler { io.initRoot(pageAddr, rootId, pageSize()); io.setInlineSize(pageAddr, inlineSize); + io.initFlagsAndVersion(pageAddr, BPlusMetaIO.FLAGS_DEFAULT, IgniteVersionUtils.VER); if (needWalDeltaRecord(metaId, metaPage, walPlc)) - wal.log(new MetaPageInitRootInlineRecord(cacheId, metaId, rootId, inlineSize)); + wal.log(new MetaPageInitRootInlineFlagsCreatedVersionRecord(cacheId, metaId, rootId, inlineSize)); assert io.getRootLevel(pageAddr) == 0; assert io.getFirstPageId(pageAddr, 0) == rootId; @@ -999,7 +1005,7 @@ private GridCursor findLowerUnbounded(L upper, Object x) throws IgniteChecked */ private void checkDestroyed() { if (destroyed.get()) - throw new IllegalStateException("Tree is being concurrently destroyed: " + getName()); + throw new IllegalStateException(CONC_DESTROY_MSG + getName()); } /** {@inheritDoc} */ @@ -1025,6 +1031,9 @@ public final GridCursor find(L lower, L upper, Object x) throws IgniteChecked throw new IgniteCheckedException("Runtime failure on bounds: [lower=" + lower + ", upper=" + upper + "]", e); } catch (RuntimeException | AssertionError e) { + if (e.getCause() instanceof SQLException) + throw e; + long[] pageIds = pages( lower == null || cursor == null || cursor.getCursor == null, () -> new long[]{cursor.getCursor.pageId} @@ -1042,44 +1051,105 @@ public final GridCursor find(L lower, L upper, Object x) throws IgniteChecked /** {@inheritDoc} */ @Override public T findFirst() throws IgniteCheckedException { + return findFirst(null); + } + + /** + * Returns a value mapped to the lowest key, or {@code null} if tree is empty or no entry matches the passed filter. + * @param filter Filter closure. + * @return Value. + * @throws IgniteCheckedException If failed. + */ + public T findFirst(TreeRowClosure filter) throws IgniteCheckedException { checkDestroyed(); long curPageId = 0L; long nextPageId = 0L; try { - long firstPageId; - - long metaPage = acquirePage(metaPageId); - try { - firstPageId = getFirstPageId(metaPageId, metaPage, 0); - } - finally { - releasePage(metaPageId, metaPage); - } + for (;;) { - long page = acquirePage(firstPageId); + long metaPage = acquirePage(metaPageId); - try { - long pageAddr = readLock(firstPageId, page); + try { + curPageId = getFirstPageId(metaPageId, metaPage, 0); // Level 0 is always at the bottom. + } + finally { + releasePage(metaPageId, metaPage); + } + long curPage = acquirePage(curPageId); try { - BPlusIO io = io(pageAddr); + long curPageAddr = readLock(curPageId, curPage); - int cnt = io.getCount(pageAddr); + if (curPageAddr == 0) + continue; // The first page has gone: restart scan. - if (cnt == 0) - return null; + try { + BPlusIO io = io(curPageAddr); + + assert io.isLeaf(); + + for (;;) { + int cnt = io.getCount(curPageAddr); - return getRow(io, pageAddr, 0); + for (int i = 0; i < cnt; ++i) { + if (filter == null || filter.apply(this, io, curPageAddr, i)) + return getRow(io, curPageAddr, i); + } + + nextPageId = io.getForward(curPageAddr); + + if (nextPageId == 0) + return null; + + long nextPage = acquirePage(nextPageId); + + try { + long nextPageAddr = readLock(nextPageId, nextPage); + + // In the current implementation the next page can't change when the current page is locked. + assert nextPageAddr != 0 : nextPageAddr; + + try { + long pa = curPageAddr; + curPageAddr = 0; // Set to zero to avoid double unlocking in finalizer. + + readUnlock(curPageId, curPage, pa); + + long p = curPage; + curPage = 0; // Set to zero to avoid double release in finalizer. + + releasePage(curPageId, p); + + curPageId = nextPageId; + curPage = nextPage; + curPageAddr = nextPageAddr; + + nextPage = 0; + nextPageAddr = 0; + } + finally { + if (nextPageAddr != 0) + readUnlock(nextPageId, nextPage, nextPageAddr); + } + } + finally { + if (nextPage != 0) + releasePage(nextPageId, nextPage); + } + } + } + finally { + if (curPageAddr != 0) + readUnlock(curPageId, curPage, curPageAddr); + } } finally { - readUnlock(firstPageId, page, pageAddr); + if (curPage != 0) + releasePage(curPageId, curPage); } } - finally { - releasePage(firstPageId, page); - } } catch (IgniteCheckedException e) { throw new IgniteCheckedException("Runtime failure on first row lookup", e); @@ -1092,6 +1162,7 @@ public final GridCursor find(L lower, L upper, Object x) throws IgniteChecked } } + /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public T findLast() throws IgniteCheckedException { @@ -1409,8 +1480,13 @@ private void validateFirstPages(long metaId, long metaPage, int rootLvl) throws /** * @param msg Message. */ - private static void fail(Object msg) { - throw new AssertionError(msg); + private void fail(Object msg) { + AssertionError err = new AssertionError(msg); + + if (failureProcessor != null) + failureProcessor.process(new FailureContext(FailureType.CRITICAL_ERROR, err)); + + throw err; } /** @@ -4602,7 +4678,7 @@ protected int compare(int lvl, BPlusIO io, long pageAddr, int idx, L row) thr * @return Full detached data row. * @throws IgniteCheckedException If failed. */ - protected final T getRow(BPlusIO io, long pageAddr, int idx) throws IgniteCheckedException { + public final T getRow(BPlusIO io, long pageAddr, int idx) throws IgniteCheckedException { return getRow(io, pageAddr, idx, null); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java index 623951bafc58d..56630e6bb25f2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/BPlusMetaIO.java @@ -19,8 +19,10 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.IgniteVersionUtils; import org.apache.ignite.internal.pagemem.PageUtils; import org.apache.ignite.internal.util.GridStringBuilder; +import org.apache.ignite.lang.IgniteProductVersion; /** * IO routines for B+Tree meta pages. @@ -28,17 +30,38 @@ public class BPlusMetaIO extends PageIO { /** */ public static final IOVersions VERSIONS = new IOVersions<>( - new BPlusMetaIO(1), new BPlusMetaIO(2) + new BPlusMetaIO(1), + new BPlusMetaIO(2), + new BPlusMetaIO(3), + new BPlusMetaIO(4) ); /** */ - private static final int LVLS_OFF = COMMON_HEADER_END; + private static final int LVLS_OFFSET = COMMON_HEADER_END; /** */ - private final int refsOff; + private static final int INLINE_SIZE_OFFSET = LVLS_OFFSET + 1; + + /** */ + private static final int FLAGS_OFFSET = INLINE_SIZE_OFFSET + 2; + + /** */ + private static final int CREATED_VER_OFFSET = FLAGS_OFFSET + 8; + + /** */ + private static final int REFS_OFFSET = CREATED_VER_OFFSET + IgniteProductVersion.SIZE_IN_BYTES; + + /** */ + private static final long FLAG_UNWRAPPED_PK = 1L; + + /** */ + private static final long FLAG_INLINE_OBJECT_SUPPORTED = 2L; + + /** FLAG_UNWRAPPED_PK - not set because unwrap PK not supported by 8.5.x versions. */ + public static final long FLAGS_DEFAULT = FLAG_INLINE_OBJECT_SUPPORTED; /** */ - private final int inlineSizeOff; + private final int refsOff; /** * @param ver Page format version. @@ -48,13 +71,19 @@ private BPlusMetaIO(int ver) { switch (ver) { case 1: - inlineSizeOff = -1; - refsOff = LVLS_OFF + 1; + refsOff = LVLS_OFFSET + 1; break; case 2: - inlineSizeOff = LVLS_OFF + 1; - refsOff = inlineSizeOff + 2; + refsOff = INLINE_SIZE_OFFSET + 2; + break; + + case 3: + refsOff = INLINE_SIZE_OFFSET + 2; + break; + + case 4: + refsOff = REFS_OFFSET; break; default: @@ -77,7 +106,7 @@ public void initRoot(long pageAdrr, long rootId, int pageSize) { * @return Number of levels in this tree. */ public int getLevelsCount(long pageAddr) { - return Byte.toUnsignedInt(PageUtils.getByte(pageAddr, LVLS_OFF)); + return Byte.toUnsignedInt(PageUtils.getByte(pageAddr, LVLS_OFFSET)); } /** @@ -97,7 +126,7 @@ private int getMaxLevels(long pageAddr, int pageSize) { private void setLevelsCount(long pageAddr, int lvls, int pageSize) { assert lvls >= 0 && lvls <= getMaxLevels(pageAddr, pageSize) : lvls; - PageUtils.putByte(pageAddr, LVLS_OFF, (byte)lvls); + PageUtils.putByte(pageAddr, LVLS_OFFSET, (byte)lvls); assert getLevelsCount(pageAddr) == lvls; } @@ -172,14 +201,105 @@ public void cutRoot(long pageAddr, int pageSize) { */ public void setInlineSize(long pageAddr, int size) { if (getVersion() > 1) - PageUtils.putShort(pageAddr, inlineSizeOff, (short)size); + PageUtils.putShort(pageAddr, INLINE_SIZE_OFFSET, (short)size); } /** * @param pageAddr Page address. + * @return Inline size. */ public int getInlineSize(long pageAddr) { - return getVersion() > 1 ? PageUtils.getShort(pageAddr, inlineSizeOff) : 0; + return getVersion() > 1 ? PageUtils.getShort(pageAddr, INLINE_SIZE_OFFSET) : 0; + } + + /** + * @param pageAddr Page address. + * @return {@code true} In case use unwrapped PK. + */ + public boolean unwrappedPk(long pageAddr) { + return supportFlags() && (flags(pageAddr) & FLAG_UNWRAPPED_PK) != 0L || getVersion() == 3; + } + + /** + * @param pageAddr Page address. + * @return {@code true} In case inline object is supported by the tree. + */ + public boolean inlineObjectSupported(long pageAddr) { + assert supportFlags(); + + return (flags(pageAddr) & FLAG_INLINE_OBJECT_SUPPORTED) != 0L; + } + + /** + * @return {@code true} If flags are supported. + */ + public boolean supportFlags() { + return getVersion() > 3; + } + + /** + * @param pageAddr Page address. + * @param flags Flags. + * @param createdVer The version of the product that creates the page (b+tree). + */ + public void initFlagsAndVersion(long pageAddr, long flags, IgniteProductVersion createdVer) { + PageUtils.putLong(pageAddr, FLAGS_OFFSET, flags); + + setCreatedVersion(pageAddr, createdVer); + } + + /** + * @param pageAddr Page address. + * @param curVer Ignite current version. + */ + public void setCreatedVersion(long pageAddr, IgniteProductVersion curVer) { + assert curVer != null; + + PageUtils.putByte(pageAddr, CREATED_VER_OFFSET, curVer.major()); + PageUtils.putByte(pageAddr, CREATED_VER_OFFSET + 1, curVer.minor()); + PageUtils.putByte(pageAddr, CREATED_VER_OFFSET + 2, curVer.maintenance()); + PageUtils.putLong(pageAddr, CREATED_VER_OFFSET + 3, curVer.revisionTimestamp()); + PageUtils.putBytes(pageAddr, CREATED_VER_OFFSET + 11, curVer.revisionHash()); + } + + /** + * @param pageAddr Page address. + * @return The version of product that creates the page. + */ + public IgniteProductVersion createdVersion(long pageAddr) { + if (getVersion() < 4) + return null; + + return new IgniteProductVersion( + PageUtils.getByte(pageAddr, CREATED_VER_OFFSET), + PageUtils.getByte(pageAddr, CREATED_VER_OFFSET + 1), + PageUtils.getByte(pageAddr, CREATED_VER_OFFSET + 2), + PageUtils.getLong(pageAddr, CREATED_VER_OFFSET + 3), + PageUtils.getBytes(pageAddr, CREATED_VER_OFFSET + 11, IgniteProductVersion.REV_HASH_SIZE)); + } + + /** + * @param pageAddr Page address. + * @return Long with flags. + */ + private long flags(long pageAddr) { + assert supportFlags(); + + return PageUtils.getLong(pageAddr, FLAGS_OFFSET); + } + + /** + * @param pageAddr Page address. + * @param unwrappedPk unwrapped primary key of this tree flag. + * @param inlineObjSupported inline POJO by created tree flag. + */ + public void setFlags(long pageAddr, boolean unwrappedPk, boolean inlineObjSupported) { + assert supportFlags(); + + long flags = unwrappedPk ? FLAG_UNWRAPPED_PK : 0; + flags |= inlineObjSupported ? FLAG_INLINE_OBJECT_SUPPORTED : 0; + + PageUtils.putLong(pageAddr, FLAGS_OFFSET, flags); } /** {@inheritDoc} */ @@ -191,4 +311,34 @@ public int getInlineSize(long pageAddr) { ; //TODO print firstPageIds by level } + + /** + * @param pageAddr Page address. + * @param inlineObjSupported Supports inline object flag. + * @param unwrappedPk Unwrap PK flag. + * @param pageSize Page size. + */ + public static void upgradePageVersion(long pageAddr, boolean inlineObjSupported, boolean unwrappedPk, int pageSize) { + BPlusMetaIO ioPrev = VERSIONS.forPage(pageAddr); + + long[] lvls = new long[ioPrev.getLevelsCount(pageAddr)]; + + for (int i = 0; i < lvls.length; ++i) + lvls[i] = ioPrev.getFirstPageId(pageAddr, i); + + int inlineSize = ioPrev.getInlineSize(pageAddr); + + BPlusMetaIO ioNew = VERSIONS.latest(); + + setVersion(pageAddr, VERSIONS.latest().getVersion()); + + ioNew.setLevelsCount(pageAddr, lvls.length, pageSize); + + for (int i = 0; i < lvls.length; ++i) + ioNew.setFirstPageId(pageAddr, i, lvls[i]); + + ioNew.setInlineSize(pageAddr, inlineSize); + ioNew.setCreatedVersion(pageAddr, IgniteVersionUtils.VER); + ioNew.setFlags(pageAddr, unwrappedPk, inlineObjSupported); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java index e5ada663217cf..d67feace42eee 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/tree/io/PageIO.java @@ -645,17 +645,22 @@ public static boolean isDataPageType(int type) { /** * @param addr Address. */ - public static String printPage(long addr, int pageSize) throws IgniteCheckedException { - PageIO io = getPageIO(addr); - + public static String printPage(long addr, int pageSize) { GridStringBuilder sb = new GridStringBuilder("Header [\n\ttype="); - sb.a(getType(addr)).a(" (").a(io.getClass().getSimpleName()) - .a("),\n\tver=").a(getVersion(addr)).a(",\n\tcrc=").a(getCrc(addr)) - .a(",\n\t").a(PageIdUtils.toDetailString(getPageId(addr))) - .a("\n],\n"); + try { + PageIO io = getPageIO(addr); + + sb.a(getType(addr)).a(" (").a(io.getClass().getSimpleName()) + .a("),\n\tver=").a(getVersion(addr)).a(",\n\tcrc=").a(getCrc(addr)) + .a(",\n\t").a(PageIdUtils.toDetailString(getPageId(addr))) + .a("\n],\n"); - io.printPage(addr, pageSize, sb); + io.printPage(addr, pageSize, sb); + } + catch (IgniteCheckedException e) { + sb.a("Failed to print page: ").a(e.getMessage()); + } return sb.toString(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java index 8a38f28e607ed..298a5b59e09a3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/AbstractWalRecordsIterator.java @@ -60,6 +60,11 @@ public abstract class AbstractWalRecordsIterator */ protected IgniteBiTuple curRec; + /** + * The exception which can be thrown during reading next record. It holds until the next calling of next record. + */ + private IgniteCheckedException curException; + /** * Current WAL segment absolute index.
Determined as lowest number of file at start, is changed during advance * segment @@ -118,9 +123,17 @@ protected AbstractWalRecordsIterator( /** {@inheritDoc} */ @Override protected IgniteBiTuple onNext() throws IgniteCheckedException { + if (curException != null) + throw curException; + IgniteBiTuple ret = curRec; - advance(); + try { + advance(); + } + catch (IgniteCheckedException e) { + curException = e; + } return ret; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java index 8b166f3e9a0c6..b4414e42bf71f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/FileWriteAheadLogManager.java @@ -134,6 +134,9 @@ import static java.nio.file.StandardOpenOption.CREATE; import static java.nio.file.StandardOpenOption.READ; import static java.nio.file.StandardOpenOption.WRITE; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_COMPRESSOR_WORKER_THREAD_CNT; import static org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_MMAP; import static org.apache.ignite.IgniteSystemProperties.IGNITE_WAL_SEGMENT_SYNC_TIMEOUT; @@ -256,6 +259,12 @@ public class FileWriteAheadLogManager extends GridCacheSharedManagerAdapter impl private final int WAL_COMPRESSOR_WORKER_THREAD_CNT = IgniteSystemProperties.getInteger(IGNITE_WAL_COMPRESSOR_WORKER_THREAD_CNT, 4); + /** + * Threshold time to print warning to log if awaiting for next wal segment took too long (exceeded this threshold). + */ + private static final long THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT = + IgniteSystemProperties.getLong(IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT, 1000L); + /** */ private final boolean alwaysWriteFullPages; @@ -1528,9 +1537,24 @@ private File pollNextFile(long curIdx) throws StorageException, IgniteInterrupte return new File(walWorkDir, FileDescriptor.fileName(curIdx + 1)); } + long absNextIdxStartTime = System.nanoTime(); + // Signal to archiver that we are done with the segment and it can be archived. long absNextIdx = archiver0.nextAbsoluteSegmentIndex(); + long absNextIdxWaitTime = U.nanosToMillis(System.nanoTime() - absNextIdxStartTime); + + if (absNextIdxWaitTime > THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT) { + log.warning( + String.format("Waiting for next wal segment was too long " + + "[waitingTime=%s, curIdx=%s, absNextIdx=%s, walSegments=%s]", + absNextIdxWaitTime, + curIdx, + absNextIdx, + dsCfg.getWalSegments()) + ); + } + long segmentIdx = absNextIdx % dsCfg.getWalSegments(); return new File(walWorkDir, FileDescriptor.fileName(segmentIdx)); @@ -2708,7 +2732,7 @@ public void writeHeader() { * * @param ptr Pointer. */ - private void flushOrWait(FileWALPointer ptr) { + private void flushOrWait(FileWALPointer ptr) throws IgniteCheckedException { if (ptr != null) { // If requested obsolete file index, it must be already flushed by close. if (ptr.index() != getSegmentId()) @@ -2721,7 +2745,7 @@ private void flushOrWait(FileWALPointer ptr) { /** * @param ptr Pointer. */ - private void flush(FileWALPointer ptr) { + private void flush(FileWALPointer ptr) throws IgniteCheckedException { if (ptr == null) { // Unconditional flush. walWriter.flushAll(); @@ -3531,21 +3555,21 @@ private void unparkWaiters(long pos) { /** * Forces all made changes to the file. */ - void force() { + void force() throws IgniteCheckedException { flushBuffer(FILE_FORCE); } /** * Closes file. */ - void close() { + void close() throws IgniteCheckedException { flushBuffer(FILE_CLOSE); } /** * Flushes all data from the buffer. */ - void flushAll() { + void flushAll() throws IgniteCheckedException { flushBuffer(UNCONDITIONAL_FLUSH); } @@ -3553,7 +3577,7 @@ void flushAll() { * @param expPos Expected position. */ @SuppressWarnings("ForLoopReplaceableByForEach") - void flushBuffer(long expPos) { + void flushBuffer(long expPos) throws IgniteCheckedException { if (mmap) return; @@ -3579,6 +3603,12 @@ void flushBuffer(long expPos) { if (val == Long.MIN_VALUE) { waiters.remove(t); + Throwable walWriterError = walWriter.err; + + if (walWriterError != null) + throw new IgniteCheckedException("Flush buffer failed.", walWriterError); + + return; } else diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java index d4253f3c2a108..05776cf889996 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/reader/StandaloneGridKernalContext.java @@ -77,7 +77,7 @@ import org.apache.ignite.internal.processors.resource.GridResourceProcessor; import org.apache.ignite.internal.processors.rest.GridRestProcessor; import org.apache.ignite.internal.processors.schedule.IgniteScheduleProcessorAdapter; -import org.apache.ignite.internal.processors.security.GridSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurity; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; import org.apache.ignite.internal.processors.service.GridServiceProcessor; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; @@ -445,7 +445,7 @@ protected IgniteConfiguration prepareIgniteConfiguration() { } /** {@inheritDoc} */ - @Override public GridSecurityProcessor security() { + @Override public IgniteSecurity security() { return null; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java index 996df70fee1fc..daaa470f35193 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/record/RecordTypes.java @@ -66,5 +66,6 @@ public final class RecordTypes { DELTA_TYPE_SET.add(WALRecord.RecordType.PAGE_LIST_META_RESET_COUNT_RECORD); DELTA_TYPE_SET.add(WALRecord.RecordType.DATA_PAGE_UPDATE_RECORD); DELTA_TYPE_SET.add(WALRecord.RecordType.BTREE_META_PAGE_INIT_ROOT2); + DELTA_TYPE_SET.add(WALRecord.RecordType.BTREE_META_PAGE_INIT_ROOT_V3); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/scanner/PrintToLogHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/scanner/PrintToLogHandler.java index 848acb913d2f1..551e4df97aade 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/scanner/PrintToLogHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/scanner/PrintToLogHandler.java @@ -63,7 +63,8 @@ public PrintToLogHandler(IgniteLogger log) { resultString = null; - log.info(msg); + if (log.isInfoEnabled()) + log.info(msg); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java index 34bc50545cbc8..026b717163de3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordDataV1Serializer.java @@ -54,6 +54,7 @@ import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageAddRootRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageCutRootRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRecord; +import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineFlagsCreatedVersionRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootInlineRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageInitRootRecord; import org.apache.ignite.internal.pagemem.wal.record.delta.MetaPageUpdateLastAllocatedIndex; @@ -93,6 +94,7 @@ import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteProductVersion; /** * Record data V1 serializer. @@ -207,6 +209,9 @@ assert record instanceof PageSnapshot; case BTREE_META_PAGE_INIT_ROOT2: return 4 + 8 + 8 + 2; + case BTREE_META_PAGE_INIT_ROOT_V3: + return 4 + 8 + 8 + 2 + 8 + IgniteProductVersion.SIZE_IN_BYTES; + case BTREE_META_PAGE_ADD_ROOT: return 4 + 8 + 8; @@ -533,6 +538,34 @@ assert record instanceof PageSnapshot; break; + case BTREE_META_PAGE_INIT_ROOT_V3: + cacheId = in.readInt(); + pageId = in.readLong(); + + long rootId3 = in.readLong(); + int inlineSize3 = in.readShort(); + + long flags = in.readLong(); + + byte[] revHash = new byte[IgniteProductVersion.REV_HASH_SIZE]; + byte maj = in.readByte(); + byte min = in.readByte(); + byte maint = in.readByte(); + long verTs = in.readLong(); + in.readFully(revHash); + + IgniteProductVersion createdVer = new IgniteProductVersion( + maj, + min, + maint, + verTs, + revHash); + + res = new MetaPageInitRootInlineFlagsCreatedVersionRecord(cacheId, pageId, rootId3, + inlineSize3, flags, createdVer); + + break; + case BTREE_META_PAGE_ADD_ROOT: cacheId = in.readInt(); pageId = in.readLong(); @@ -1045,6 +1078,29 @@ assert record instanceof PageSnapshot; buf.putShort((short)imRec2.inlineSize()); break; + case BTREE_META_PAGE_INIT_ROOT_V3: + MetaPageInitRootInlineFlagsCreatedVersionRecord imRec3 = + (MetaPageInitRootInlineFlagsCreatedVersionRecord)rec; + + buf.putInt(imRec3.groupId()); + buf.putLong(imRec3.pageId()); + + buf.putLong(imRec3.rootId()); + + buf.putShort((short)imRec3.inlineSize()); + + buf.putLong(imRec3.flags()); + + // Write created version. + IgniteProductVersion createdVer = imRec3.createdVersion(); + buf.put(createdVer.major()); + buf.put(createdVer.minor()); + buf.put(createdVer.maintenance()); + buf.putLong(createdVer.revisionTimestamp()); + buf.put(createdVer.revisionHash()); + + break; + case BTREE_META_PAGE_ADD_ROOT: MetaPageAddRootRecord arRec = (MetaPageAddRootRecord)rec; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV1Serializer.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV1Serializer.java index e27faa5f02025..193e492b06d3b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV1Serializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/persistence/wal/serializer/RecordV1Serializer.java @@ -56,7 +56,7 @@ * Record V1 serializer. * Stores records in following format: *