diff --git a/.gitignore b/.gitignore index 535a8ff093818..cd52dfa752478 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ git-patch-prop-local.sh **/dotnet/libs/ *.classname* *.exe +.mvn/ #Visual Studio files *.[Oo]bj @@ -93,6 +94,7 @@ packages /modules/platforms/cpp/missing /modules/platforms/cpp/odbc-test/ignite-odbc-tests /modules/platforms/cpp/stamp-h1 +/modules/platforms/cpp/thin-client-test/ignite-thin-client-tests #Files related to ML manual-runnable tests -/modules/ml/src/test/resources/manualrun/trees/columntrees.manualrun.properties \ No newline at end of file +/modules/ml/src/test/resources/manualrun/trees/columntrees.manualrun.properties diff --git a/DEVNOTES.txt b/DEVNOTES.txt index 6ec0f39be6ea9..7785ee780a4b4 100644 --- a/DEVNOTES.txt +++ b/DEVNOTES.txt @@ -75,36 +75,39 @@ Instructions can be found at modules/platforms/cpp/DEVNOTES.txt. Apache Ignite RPM Package Build Instructions ============================================ -1) Install dependencies +1) Build Apache Ignite binary archive (instructions above) - yum install -y rpmdevtools rpm-build +2) Run packaging script and follow messages if they required interaction -2) Create directory layout + packaging/package.sh --rpm - rpmdev-setuptree + Packages will be available in packaging/ directory -3) Copy build specification and additional sources required for build - cp -rfv packaging/rpm/* ~/rpmbuild +Apache Ignite DEB Package Build Instructions +============================================ +1) Build Apache Ignite binary archive (instructions above) -4) Copy built binary from target/bin to sources directory +2) Build Apache Ignite RPM package (also above) - cp -rfv target/bin/apache-ignite--bin.zip ~/rpmbuild/SOURCES/apache-ignite.zip +3) Run packaging script and follow messages if they required interaction -5) Run build process + packaging/package.sh --deb - rpmbuild -bb ~/rpmbuild/SPECS/apache-ignite.spec + Packages will be available in packaging/ directory - NOTE: to build also SRPM package, run with -ba flag instead + NOTE: to build both packages type at once, run - Package will be available in ~/rpmbuild/RPMS/noarch + packaging/package.sh --rpm --deb -Install and Run Apache Ignite from RPM -====================================== -1) Install package +Install and Run Apache Ignite from packages +=========================================== +1) Install package with YUM or APT respectively - yum localinstall -y apache-ignite-.rpm + yum localinstall -y apache-ignite-.noarch.rpm + or + dpkg -i apache-ignite__all.deb 2) Start Apache Ignite service diff --git a/NOTICE b/NOTICE index 4c99a05109185..0bae4345e6446 100644 --- a/NOTICE +++ b/NOTICE @@ -1,5 +1,5 @@ Apache Ignite -Copyright 2018 The Apache Software Foundation +Copyright 2019 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/assembly/NOTICE_HADOOP b/assembly/NOTICE_HADOOP index 4c99a05109185..0bae4345e6446 100644 --- a/assembly/NOTICE_HADOOP +++ b/assembly/NOTICE_HADOOP @@ -1,5 +1,5 @@ Apache Ignite -Copyright 2018 The Apache Software Foundation +Copyright 2019 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/assembly/release-fabric-base.xml b/assembly/release-fabric-base.xml index b4a58bf32b023..b0c5d116fd270 100644 --- a/assembly/release-fabric-base.xml +++ b/assembly/release-fabric-base.xml @@ -120,6 +120,12 @@ /platforms/cpp/common + + + modules/platforms/cpp/network + /platforms/cpp/network + + modules/platforms/cpp/jni @@ -156,6 +162,12 @@ /platforms/cpp/binary + + + modules/platforms/cpp/thin-client + /platforms/cpp/thin-client + + modules/platforms/licenses @@ -167,18 +179,6 @@ modules/clients/target/cppdoc/html /platforms/cpp/docs - - - - modules/platforms/cpp/bin - /platforms/cpp/bin - - - - - modules/platforms/cpp/bin - /platforms/cpp/bin - diff --git a/assembly/release-scala.xml b/assembly/release-scala.xml index 745641528b460..0164fec3f53e5 100644 --- a/assembly/release-scala.xml +++ b/assembly/release-scala.xml @@ -35,6 +35,7 @@ / **/ignitevisorcmd.bat + **/include/visorcmd/node_startup_by_ssh.sample.ini diff --git a/bin/control.bat b/bin/control.bat index 8a1e1c84e3c70..4894cbc4322ad 100644 --- a/bin/control.bat +++ b/bin/control.bat @@ -104,7 +104,7 @@ if "%OS%" == "Windows_NT" set PROG_NAME=%~nx0% :: call "%SCRIPTS_HOME%\include\setenv.bat" call "%SCRIPTS_HOME%\include\build-classpath.bat" -set CP=%IGNITE_LIBS% +set CP=%IGNITE_LIBS%;%IGNITE_HOME%\libs\optional\ignite-zookeeper\* :: :: Process 'restart'. @@ -156,6 +156,11 @@ if %ERRORLEVEL% equ 0 ( if "%JVM_OPTS%" == "" set JVM_OPTS=-Xms256m -Xmx1g ) +:: +:: Uncomment to enable experimental commands [--wal] +:: +:: set JVM_OPTS=%JVM_OPTS% -DIGNITE_ENABLE_EXPERIMENTAL_COMMAND=true + :: :: Uncomment the following GC settings if you see spikes in your throughput due to Garbage Collection. :: diff --git a/bin/control.sh b/bin/control.sh index ad4b14b05151b..7f84696831c50 100755 --- a/bin/control.sh +++ b/bin/control.sh @@ -54,7 +54,7 @@ fi # . "${SCRIPTS_HOME}"/include/setenv.sh . "${SCRIPTS_HOME}"/include/build-classpath.sh # Will be removed in the binary release. -CP="${IGNITE_LIBS}" +CP="${IGNITE_LIBS}:${IGNITE_HOME}/libs/optional/ignite-zookeeper/*" RANDOM_NUMBER=$("$JAVA" -cp "${CP}" org.apache.ignite.startup.cmdline.CommandLineRandomNumberGenerator) @@ -92,6 +92,11 @@ if [ -z "$JVM_OPTS" ] ; then fi fi +# +# Uncomment to enable experimental commands [--wal] +# +# JVM_OPTS="${JVM_OPTS} -DIGNITE_ENABLE_EXPERIMENTAL_COMMAND=true" + # # Uncomment the following GC settings if you see spikes in your throughput due to Garbage Collection. # diff --git a/bin/include/visorcmd/node_startup_by_ssh.sample.ini b/bin/include/visorcmd/node_startup_by_ssh.sample.ini new file mode 100644 index 0000000000000..e50ff29930459 --- /dev/null +++ b/bin/include/visorcmd/node_startup_by_ssh.sample.ini @@ -0,0 +1,74 @@ +;Licensed to the Apache Software Foundation (ASF) under one or more +;contributor license agreements. See the NOTICE file distributed with +;this work for additional information regarding copyright ownership. +;The ASF licenses this file to You under the Apache License, Version 2.0 +;(the "License"); you may not use this file except in compliance with +;the License. You may obtain a copy of the License at +; +;http://www.apache.org/licenses/LICENSE-2.0 +; +;Unless required by applicable law or agreed to in writing, software +;distributed under the License is distributed on an "AS IS" BASIS, +;WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +;See the License for the specific language governing permissions and +;limitations under the License. + +# Section with settings for host1: +[host1] +# IP address or host name: +host=192.168.1.1 + +# SSH port: +port=2200 + +# SSH login: +uname=userName + +# SSH password: +passwd=password + +# SSH key path: +key=~/.ssh/id_rsa + +# Number of nodes to start: +nodes=1 + +# Ignite home path: +igniteHome=/usr/lib/ignite + +# Ignite config path: +cfg=examples/example-ignite.xml + +# Ignite node start script: +script=bin/ignite.sh + +# Section with settings for host2: +[host2] +# IP address or host name: +host=192.168.1.2 + +# Section with default settings. Used if value not defined in host section. +[defaults] +# SSH port: +port=22 + +# SSH login: +uname=userName + +# SSH password: +passwd=password + +# SSH key path: +key=~/.ssh/id_rsa + +# Number of nodes to start: +nodes=3 + +# Ignite home path: +igniteHome=/usr/lib/ignite + +# Ignite config path: +cfg=examples/example-ignite.xml + +# Ignite node start script: +script=bin/ignite.sh diff --git a/docker/web-console/standalone/Dockerfile b/docker/web-console/standalone/Dockerfile new file mode 100644 index 0000000000000..bd03c7dee3d6d --- /dev/null +++ b/docker/web-console/standalone/Dockerfile @@ -0,0 +1,84 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +#~~~~~~~~~~~~~~~~~~# +# Frontend build # +#~~~~~~~~~~~~~~~~~~# +FROM node:10-stretch as frontend-build + +ENV NPM_CONFIG_LOGLEVEL error + +WORKDIR /opt/web-console + +# Install node modules and build sources +COPY frontend frontend +RUN cd frontend && \ + npm install --no-optional && \ + npm run build + + +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# +# Web Console Standalone assemble # +#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~# +FROM node:10-stretch + +ENV NPM_CONFIG_LOGLEVEL error + +# Install global node packages +RUN npm install -g pm2 + +# Update software sources and install missing applications +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 9DA31620334BD75D9DCB49F368818C72E52529D4 \ + && echo "deb http://repo.mongodb.org/apt/debian stretch/mongodb-org/4.0 main" | tee /etc/apt/sources.list.d/mongodb-org-4.0.list + apt update && \ + apt install -y --no-install-recommends \ + nginx-light \ + mongodb-org-server \ + dos2unix && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /opt/web-console + +# Install node modules for backend +COPY backend/package*.json backend/ +RUN cd backend && \ + npm install --no-optional --production + +# Copy and build sources +COPY backend backend +RUN cd backend && \ + npm run build + +# Copy Ignite Web Agent module package +COPY ignite-web-agent-*.zip backend/agent_dists + +# Copy previously built frontend +COPY --from=frontend-build /opt/web-console/frontend/build static + +# Copy and fix entrypoint script +COPY docker-entrypoint.sh docker-entrypoint.sh +RUN chmod +x docker-entrypoint.sh \ + && dos2unix docker-entrypoint.sh + +# Copy nginx configuration +COPY nginx/* /etc/nginx/ + +EXPOSE 80 + +ENTRYPOINT ["/opt/web-console/docker-entrypoint.sh"] + diff --git a/examples/pom.xml b/examples/pom.xml index cab1a89bb61e2..8a854cfc1771a 100644 --- a/examples/pom.xml +++ b/examples/pom.xml @@ -27,7 +27,7 @@ ignite-examples - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT @@ -298,4 +298,4 @@ - \ No newline at end of file + diff --git a/examples/src/main/java/org/apache/ignite/examples/binary/datagrid/CacheClientBinaryQueryExample.java b/examples/src/main/java/org/apache/ignite/examples/binary/datagrid/CacheClientBinaryQueryExample.java index a3c99962811a9..3bf2482140eec 100644 --- a/examples/src/main/java/org/apache/ignite/examples/binary/datagrid/CacheClientBinaryQueryExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/binary/datagrid/CacheClientBinaryQueryExample.java @@ -19,12 +19,13 @@ import java.sql.Timestamp; import java.util.Arrays; -import java.util.LinkedHashMap; +import java.util.Collections; import java.util.List; import javax.cache.Cache; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.Ignition; +import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.cache.CacheKeyConfiguration; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.QueryEntity; @@ -40,7 +41,6 @@ import org.apache.ignite.examples.model.EmployeeKey; import org.apache.ignite.examples.model.Organization; import org.apache.ignite.examples.model.OrganizationType; -import org.apache.ignite.binary.BinaryObject; /** * This example demonstrates use of binary objects with cache queries. @@ -134,30 +134,21 @@ public static void main(String[] args) { * @return Cache type metadata. */ private static QueryEntity createEmployeeQueryEntity() { - QueryEntity employeeEntity = new QueryEntity(); - - employeeEntity.setValueType(Employee.class.getName()); - employeeEntity.setKeyType(EmployeeKey.class.getName()); - - LinkedHashMap fields = new LinkedHashMap<>(); - - fields.put("name", String.class.getName()); - fields.put("salary", Long.class.getName()); - fields.put("addr.zip", Integer.class.getName()); - fields.put("organizationId", Integer.class.getName()); - fields.put("addr.street", Integer.class.getName()); - - employeeEntity.setFields(fields); - - employeeEntity.setIndexes(Arrays.asList( - new QueryIndex("name"), - new QueryIndex("salary"), - new QueryIndex("addr.zip"), - new QueryIndex("organizationId"), - new QueryIndex("addr.street", QueryIndexType.FULLTEXT) - )); - - return employeeEntity; + return new QueryEntity() + .setValueType(Employee.class.getName()) + .setKeyType(EmployeeKey.class.getName()) + .addQueryField("organizationId", Integer.class.getName(), null) + .addQueryField("name", String.class.getName(), null) + .addQueryField("salary", Long.class.getName(), null) + .addQueryField("addr.zip", Integer.class.getName(), "zip") + .addQueryField("addr.street", String.class.getName(), null) + .setKeyFields(Collections.singleton("organizationId")) + .setIndexes(Arrays.asList( + new QueryIndex("name"), + new QueryIndex("salary"), + new QueryIndex("addr.zip"), + new QueryIndex("organizationId"), + new QueryIndex("addr.street", QueryIndexType.FULLTEXT))); } /** @@ -166,23 +157,15 @@ private static QueryEntity createEmployeeQueryEntity() { * @return Cache type metadata. */ private static QueryEntity createOrganizationQueryEntity() { - QueryEntity organizationEntity = new QueryEntity(); - - organizationEntity.setValueType(Organization.class.getName()); - organizationEntity.setKeyType(Integer.class.getName()); - - LinkedHashMap fields = new LinkedHashMap<>(); - - fields.put("name", String.class.getName()); - fields.put("address.street", String.class.getName()); - - organizationEntity.setFields(fields); - - organizationEntity.setIndexes(Arrays.asList( - new QueryIndex("name") - )); - - return organizationEntity; + return new QueryEntity() + .setValueType(Organization.class.getName()) + .setKeyType(Integer.class.getName()) + .addQueryField("keyId", Integer.class.getName(), null) + .addQueryField("name", String.class.getName(), null) + .addQueryField("address.street", String.class.getName(), null) + .setKeyFieldName("keyId") + .setIndexes(Arrays.asList( + new QueryIndex("name"))); } /** @@ -210,20 +193,19 @@ private static void sqlQuery(IgniteCache cache) { * @param cache Ignite cache. */ private static void sqlJoinQuery(IgniteCache cache) { - SqlQuery qry = new SqlQuery<>(Employee.class, - "from Employee, \"" + ORGANIZATION_CACHE_NAME + "\".Organization as org " + - "where Employee.organizationId = org._key and org.name = ?"); + SqlFieldsQuery qry = new SqlFieldsQuery( + "select e.* from Employee e, \"" + ORGANIZATION_CACHE_NAME + "\".Organization as org " + + "where e.organizationId = org.keyId and org.name = ?"); String organizationName = "GridGain"; - QueryCursor> employees = - cache.query(qry.setArgs(organizationName)); + QueryCursor> employees = cache.query(qry.setArgs(organizationName)); System.out.println(); System.out.println(">>> Employees working for " + organizationName + ':'); - for (Cache.Entry e : employees.getAll()) - System.out.println(">>> " + e.getValue()); + for (List row : employees.getAll()) + System.out.println(">>> " + row); } /** diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/DatasetWithObviousStructure.java b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/DatasetWithObviousStructure.java deleted file mode 100644 index 5cd0e099c1a13..0000000000000 --- a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/DatasetWithObviousStructure.java +++ /dev/null @@ -1,105 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.examples.ml.clustering; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Random; -import java.util.stream.Collectors; -import java.util.stream.IntStream; -import org.apache.ignite.ml.math.Matrix; -import org.apache.ignite.ml.math.Vector; -import org.apache.ignite.ml.math.VectorUtils; -import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; - -/** - * See KMeansDistributedClustererTestSingleNode#testClusterizationOnDatasetWithObviousStructure. - */ -class DatasetWithObviousStructure { - /** */ - private final Random rnd = new Random(123456L); - - /** Let centers be in the vertices of square. */ - private final Map centers = new HashMap<>(); - - /** Square side length. */ - private final int squareSideLen; - - /** */ - DatasetWithObviousStructure(int squareSideLen) { - this.squareSideLen = squareSideLen; - centers.put(100, new DenseLocalOnHeapVector(new double[] {0.0, 0.0})); - centers.put(900, new DenseLocalOnHeapVector(new double[] {squareSideLen, 0.0})); - centers.put(3000, new DenseLocalOnHeapVector(new double[] {0.0, squareSideLen})); - centers.put(6000, new DenseLocalOnHeapVector(new double[] {squareSideLen, squareSideLen})); - } - - /** */ - List generate(Matrix points) { - int ptsCnt = points.rowSize(); - - // Mass centers of dataset. - List massCenters = new ArrayList<>(); - - int centersCnt = centers.size(); - - List permutation = IntStream.range(0, ptsCnt).boxed().collect(Collectors.toList()); - Collections.shuffle(permutation, rnd); - - Vector[] mc = new Vector[centersCnt]; - Arrays.fill(mc, VectorUtils.zeroes(2)); - - int totalCnt = 0; - - int centIdx = 0; - massCenters.clear(); - - for (Integer count : centers.keySet()) { - for (int i = 0; i < count; i++) { - Vector pnt = getPoint(count); - - mc[centIdx] = mc[centIdx].plus(pnt); - - points.assignRow(permutation.get(totalCnt), pnt); - - totalCnt++; - } - massCenters.add(mc[centIdx].times(1 / (double)count)); - centIdx++; - } - - return massCenters; - } - - /** */ - Map centers() { - return centers; - } - - /** */ - private Vector getPoint(Integer cnt) { - Vector pnt = new DenseLocalOnHeapVector(2).assign(centers.get(cnt)); - // Perturbate point on random value. - pnt.map(val -> val + rnd.nextDouble() * squareSideLen / 100); - return pnt; - } -} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/FuzzyCMeansExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/FuzzyCMeansExample.java deleted file mode 100644 index 23aeed7abc2f7..0000000000000 --- a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/FuzzyCMeansExample.java +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.examples.ml.clustering; - -import org.apache.ignite.Ignite; -import org.apache.ignite.Ignition; -import org.apache.ignite.examples.ExampleNodeStartup; -import org.apache.ignite.ml.clustering.BaseFuzzyCMeansClusterer; -import org.apache.ignite.ml.clustering.FuzzyCMeansDistributedClusterer; -import org.apache.ignite.ml.clustering.FuzzyCMeansModel; -import org.apache.ignite.ml.math.StorageConstants; -import org.apache.ignite.ml.math.Vector; -import org.apache.ignite.ml.math.distances.DistanceMeasure; -import org.apache.ignite.ml.math.distances.EuclideanDistance; -import org.apache.ignite.ml.math.impls.matrix.SparseDistributedMatrix; -import org.apache.ignite.thread.IgniteThread; - -/** - *

- * This example shows how to use {@link FuzzyCMeansDistributedClusterer}.

- *

- * Remote nodes should always be started with special configuration file which - * enables P2P class loading: {@code 'ignite.{sh|bat} examples/config/example-ignite.xml'}.

- *

- * Alternatively you can run {@link ExampleNodeStartup} in another JVM which will start node - * with {@code examples/config/example-ignite.xml} configuration.

- */ -public final class FuzzyCMeansExample { - /** - * Executes example. - * - * @param args Command line arguments, none required. - */ - public static void main(String[] args) throws InterruptedException { - System.out.println(">>> Fuzzy C-Means usage example started."); - - // Start ignite grid. - try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { - System.out.println(">>> Ignite grid started."); - - // Start new Ignite thread. - IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), - FuzzyCMeansExample.class.getSimpleName(), - () -> { - // Distance measure that computes distance between two points. - DistanceMeasure distanceMeasure = new EuclideanDistance(); - - // "Fuzziness" - specific constant that is used in membership calculation (1.0+-eps ~ K-Means). - double exponentialWeight = 2.0; - - // Condition that indicated when algorithm must stop. - // In this example algorithm stops if memberships have changed insignificantly. - BaseFuzzyCMeansClusterer.StopCondition stopCond = - BaseFuzzyCMeansClusterer.StopCondition.STABLE_MEMBERSHIPS; - - // Maximum difference between new and old membership values with which algorithm will continue to work. - double maxDelta = 0.01; - - // The maximum number of FCM iterations. - int maxIterations = 50; - - // Value that is used to initialize random numbers generator. You can choose it randomly. - Long seed = null; - - // Number of steps of primary centers selection (more steps more candidates). - int initializationSteps = 2; - - // Number of K-Means iteration that is used to choose required number of primary centers from candidates. - int kMeansMaxIterations = 50; - - // Create new distributed clusterer with parameters described above. - System.out.println(">>> Create new Distributed Fuzzy C-Means clusterer."); - FuzzyCMeansDistributedClusterer clusterer = new FuzzyCMeansDistributedClusterer( - distanceMeasure, exponentialWeight, stopCond, maxDelta, maxIterations, - seed, initializationSteps, kMeansMaxIterations); - - // Create sample data. - double[][] points = new double[][] { - {-10, -10}, {-9, -11}, {-10, -9}, {-11, -9}, - {10, 10}, {9, 11}, {10, 9}, {11, 9}, - {-10, 10}, {-9, 11}, {-10, 9}, {-11, 9}, - {10, -10}, {9, -11}, {10, -9}, {11, -9}}; - - // Initialize matrix of data points. Each row contains one point. - int rows = points.length; - int cols = points[0].length; - - System.out.println(">>> Create the matrix that contains sample points."); - SparseDistributedMatrix pntMatrix = new SparseDistributedMatrix(rows, cols, - StorageConstants.ROW_STORAGE_MODE, StorageConstants.RANDOM_ACCESS_MODE); - - // Store points into matrix. - pntMatrix.assign(points); - - // Call clusterization method with some number of centers. - // It returns model that can predict results for new points. - System.out.println(">>> Perform clusterization."); - int numCenters = 4; - FuzzyCMeansModel mdl = clusterer.cluster(pntMatrix, numCenters); - - // You can also get centers of clusters that is computed by Fuzzy C-Means algorithm. - Vector[] centers = mdl.centers(); - - String res = ">>> Results:\n" - + ">>> 1st center: " + centers[0].get(0) + " " + centers[0].get(1) + "\n" - + ">>> 2nd center: " + centers[1].get(0) + " " + centers[1].get(1) + "\n" - + ">>> 3rd center: " + centers[2].get(0) + " " + centers[2].get(1) + "\n" - + ">>> 4th center: " + centers[3].get(0) + " " + centers[3].get(1) + "\n"; - - System.out.println(res); - - pntMatrix.destroy(); - }); - - igniteThread.start(); - igniteThread.join(); - } - } -} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/FuzzyCMeansLocalExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/FuzzyCMeansLocalExample.java deleted file mode 100644 index 5c1753ad37ffb..0000000000000 --- a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/FuzzyCMeansLocalExample.java +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.examples.ml.clustering; - -import org.apache.ignite.ml.clustering.BaseFuzzyCMeansClusterer; -import org.apache.ignite.ml.clustering.FuzzyCMeansLocalClusterer; -import org.apache.ignite.ml.clustering.FuzzyCMeansModel; -import org.apache.ignite.ml.math.Vector; -import org.apache.ignite.ml.math.distances.DistanceMeasure; -import org.apache.ignite.ml.math.distances.EuclideanDistance; -import org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix; - -/** - * This example shows how to use {@link FuzzyCMeansLocalClusterer}. - */ -public final class FuzzyCMeansLocalExample { - /** - * Executes example. - * - * @param args Command line arguments, none required. - */ - public static void main(String[] args) { - System.out.println(">>> Local Fuzzy C-Means usage example started."); - - // Distance measure that computes distance between two points. - DistanceMeasure distanceMeasure = new EuclideanDistance(); - - // "Fuzziness" - specific constant that is used in membership calculation (1.0+-eps ~ K-Means). - double exponentialWeight = 2.0; - - // Condition that indicated when algorithm must stop. - // In this example algorithm stops if memberships have changed insignificantly. - BaseFuzzyCMeansClusterer.StopCondition stopCond = - BaseFuzzyCMeansClusterer.StopCondition.STABLE_MEMBERSHIPS; - - // Maximum difference between new and old membership values with which algorithm will continue to work. - double maxDelta = 0.01; - - // The maximum number of FCM iterations. - int maxIterations = 50; - - // Value that is used to initialize random numbers generator. You can choose it randomly. - Long seed = null; - - // Create new distributed clusterer with parameters described above. - System.out.println(">>> Create new Local Fuzzy C-Means clusterer."); - FuzzyCMeansLocalClusterer clusterer = new FuzzyCMeansLocalClusterer(distanceMeasure, - exponentialWeight, stopCond, - maxDelta, maxIterations, seed); - - // Create sample data. - double[][] points = new double[][] { - {-10, -10}, {-9, -11}, {-10, -9}, {-11, -9}, - {10, 10}, {9, 11}, {10, 9}, {11, 9}, - {-10, 10}, {-9, 11}, {-10, 9}, {-11, 9}, - {10, -10}, {9, -11}, {10, -9}, {11, -9}}; - - // Initialize matrix of data points. Each row contains one point. - System.out.println(">>> Create the matrix that contains sample points."); - // Store points into matrix. - DenseLocalOnHeapMatrix pntMatrix = new DenseLocalOnHeapMatrix(points); - - // Call clusterization method with some number of centers. - // It returns model that can predict results for new points. - System.out.println(">>> Perform clusterization."); - int numCenters = 4; - FuzzyCMeansModel mdl = clusterer.cluster(pntMatrix, numCenters); - - // You can also get centers of clusters that is computed by Fuzzy C-Means algorithm. - Vector[] centers = mdl.centers(); - - String res = ">>> Results:\n" - + ">>> 1st center: " + centers[0].get(0) + " " + centers[0].get(1) + "\n" - + ">>> 2nd center: " + centers[1].get(0) + " " + centers[1].get(1) + "\n" - + ">>> 3rd center: " + centers[2].get(0) + " " + centers[2].get(1) + "\n" - + ">>> 4th center: " + centers[3].get(0) + " " + centers[3].get(1) + "\n"; - - System.out.println(res); - } -} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java new file mode 100644 index 0000000000000..8825ebbd4a913 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansClusterizationExample.java @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.clustering; + +import java.util.Arrays; +import java.util.UUID; +import javax.cache.Cache; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilder; +import org.apache.ignite.ml.knn.classification.KNNClassificationTrainer; +import org.apache.ignite.ml.math.Tracer; +import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; +import org.apache.ignite.ml.clustering.kmeans.KMeansModel; +import org.apache.ignite.ml.clustering.kmeans.KMeansTrainer; +import org.apache.ignite.thread.IgniteThread; + +/** + * Run kNN multi-class classification trainer over distributed dataset. + * + * @see KNNClassificationTrainer + */ +public class KMeansClusterizationExample { + /** Run example. */ + public static void main(String[] args) throws InterruptedException { + System.out.println(); + System.out.println(">>> KMeans clustering algorithm over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), + KMeansClusterizationExample.class.getSimpleName(), () -> { + IgniteCache dataCache = getTestCache(ignite); + + KMeansTrainer trainer = new KMeansTrainer() + .withSeed(7867L); + + KMeansModel mdl = trainer.fit( + new CacheBasedDatasetBuilder<>(ignite, dataCache), + (k, v) -> Arrays.copyOfRange(v, 1, v.length), + (k, v) -> v[0] + ); + + System.out.println(">>> KMeans centroids"); + Tracer.showAscii(mdl.centers()[0]); + Tracer.showAscii(mdl.centers()[1]); + System.out.println(">>>"); + + System.out.println(">>> -----------------------------------"); + System.out.println(">>> | Predicted cluster\t| Real Label\t|"); + System.out.println(">>> -----------------------------------"); + + int amountOfErrors = 0; + int totalAmount = 0; + + try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { + for (Cache.Entry observation : observations) { + double[] val = observation.getValue(); + double[] inputs = Arrays.copyOfRange(val, 1, val.length); + double groundTruth = val[0]; + + double prediction = mdl.apply(new DenseLocalOnHeapVector(inputs)); + + totalAmount++; + if (groundTruth != prediction) + amountOfErrors++; + + System.out.printf(">>> | %.4f\t\t\t| %.4f\t\t|\n", prediction, groundTruth); + } + + System.out.println(">>> ---------------------------------"); + + System.out.println("\n>>> Absolute amount of errors " + amountOfErrors); + System.out.println("\n>>> Accuracy " + (1 - amountOfErrors / (double)totalAmount)); + } + }); + + igniteThread.start(); + igniteThread.join(); + } + } + + /** + * Fills cache with data and returns it. + * + * @param ignite Ignite instance. + * @return Filled Ignite Cache. + */ + private static IgniteCache getTestCache(Ignite ignite) { + CacheConfiguration cacheConfiguration = new CacheConfiguration<>(); + cacheConfiguration.setName("TEST_" + UUID.randomUUID()); + cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache cache = ignite.createCache(cacheConfiguration); + + for (int i = 0; i < data.length; i++) + cache.put(i, data[i]); + + return cache; + } + + /** The Iris dataset. */ + private static final double[][] data = { + {0, 5.1, 3.5, 1.4, 0.2}, + {0, 4.9, 3, 1.4, 0.2}, + {0, 4.7, 3.2, 1.3, 0.2}, + {0, 4.6, 3.1, 1.5, 0.2}, + {0, 5, 3.6, 1.4, 0.2}, + {0, 5.4, 3.9, 1.7, 0.4}, + {0, 4.6, 3.4, 1.4, 0.3}, + {0, 5, 3.4, 1.5, 0.2}, + {0, 4.4, 2.9, 1.4, 0.2}, + {0, 4.9, 3.1, 1.5, 0.1}, + {0, 5.4, 3.7, 1.5, 0.2}, + {0, 4.8, 3.4, 1.6, 0.2}, + {0, 4.8, 3, 1.4, 0.1}, + {0, 4.3, 3, 1.1, 0.1}, + {0, 5.8, 4, 1.2, 0.2}, + {0, 5.7, 4.4, 1.5, 0.4}, + {0, 5.4, 3.9, 1.3, 0.4}, + {0, 5.1, 3.5, 1.4, 0.3}, + {0, 5.7, 3.8, 1.7, 0.3}, + {0, 5.1, 3.8, 1.5, 0.3}, + {0, 5.4, 3.4, 1.7, 0.2}, + {0, 5.1, 3.7, 1.5, 0.4}, + {0, 4.6, 3.6, 1, 0.2}, + {0, 5.1, 3.3, 1.7, 0.5}, + {0, 4.8, 3.4, 1.9, 0.2}, + {0, 5, 3, 1.6, 0.2}, + {0, 5, 3.4, 1.6, 0.4}, + {0, 5.2, 3.5, 1.5, 0.2}, + {0, 5.2, 3.4, 1.4, 0.2}, + {0, 4.7, 3.2, 1.6, 0.2}, + {0, 4.8, 3.1, 1.6, 0.2}, + {0, 5.4, 3.4, 1.5, 0.4}, + {0, 5.2, 4.1, 1.5, 0.1}, + {0, 5.5, 4.2, 1.4, 0.2}, + {0, 4.9, 3.1, 1.5, 0.1}, + {0, 5, 3.2, 1.2, 0.2}, + {0, 5.5, 3.5, 1.3, 0.2}, + {0, 4.9, 3.1, 1.5, 0.1}, + {0, 4.4, 3, 1.3, 0.2}, + {0, 5.1, 3.4, 1.5, 0.2}, + {0, 5, 3.5, 1.3, 0.3}, + {0, 4.5, 2.3, 1.3, 0.3}, + {0, 4.4, 3.2, 1.3, 0.2}, + {0, 5, 3.5, 1.6, 0.6}, + {0, 5.1, 3.8, 1.9, 0.4}, + {0, 4.8, 3, 1.4, 0.3}, + {0, 5.1, 3.8, 1.6, 0.2}, + {0, 4.6, 3.2, 1.4, 0.2}, + {0, 5.3, 3.7, 1.5, 0.2}, + {0, 5, 3.3, 1.4, 0.2}, + {1, 7, 3.2, 4.7, 1.4}, + {1, 6.4, 3.2, 4.5, 1.5}, + {1, 6.9, 3.1, 4.9, 1.5}, + {1, 5.5, 2.3, 4, 1.3}, + {1, 6.5, 2.8, 4.6, 1.5}, + {1, 5.7, 2.8, 4.5, 1.3}, + {1, 6.3, 3.3, 4.7, 1.6}, + {1, 4.9, 2.4, 3.3, 1}, + {1, 6.6, 2.9, 4.6, 1.3}, + {1, 5.2, 2.7, 3.9, 1.4}, + {1, 5, 2, 3.5, 1}, + {1, 5.9, 3, 4.2, 1.5}, + {1, 6, 2.2, 4, 1}, + {1, 6.1, 2.9, 4.7, 1.4}, + {1, 5.6, 2.9, 3.6, 1.3}, + {1, 6.7, 3.1, 4.4, 1.4}, + {1, 5.6, 3, 4.5, 1.5}, + {1, 5.8, 2.7, 4.1, 1}, + {1, 6.2, 2.2, 4.5, 1.5}, + {1, 5.6, 2.5, 3.9, 1.1}, + {1, 5.9, 3.2, 4.8, 1.8}, + {1, 6.1, 2.8, 4, 1.3}, + {1, 6.3, 2.5, 4.9, 1.5}, + {1, 6.1, 2.8, 4.7, 1.2}, + {1, 6.4, 2.9, 4.3, 1.3}, + {1, 6.6, 3, 4.4, 1.4}, + {1, 6.8, 2.8, 4.8, 1.4}, + {1, 6.7, 3, 5, 1.7}, + {1, 6, 2.9, 4.5, 1.5}, + {1, 5.7, 2.6, 3.5, 1}, + {1, 5.5, 2.4, 3.8, 1.1}, + {1, 5.5, 2.4, 3.7, 1}, + {1, 5.8, 2.7, 3.9, 1.2}, + {1, 6, 2.7, 5.1, 1.6}, + {1, 5.4, 3, 4.5, 1.5}, + {1, 6, 3.4, 4.5, 1.6}, + {1, 6.7, 3.1, 4.7, 1.5}, + {1, 6.3, 2.3, 4.4, 1.3}, + {1, 5.6, 3, 4.1, 1.3}, + {1, 5.5, 2.5, 4, 1.3}, + {1, 5.5, 2.6, 4.4, 1.2}, + {1, 6.1, 3, 4.6, 1.4}, + {1, 5.8, 2.6, 4, 1.2}, + {1, 5, 2.3, 3.3, 1}, + {1, 5.6, 2.7, 4.2, 1.3}, + {1, 5.7, 3, 4.2, 1.2}, + {1, 5.7, 2.9, 4.2, 1.3}, + {1, 6.2, 2.9, 4.3, 1.3}, + {1, 5.1, 2.5, 3, 1.1}, + {1, 5.7, 2.8, 4.1, 1.3}, + }; +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansDistributedClustererExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansDistributedClustererExample.java deleted file mode 100644 index f8709e6da45b6..0000000000000 --- a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansDistributedClustererExample.java +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.examples.ml.clustering; - -import java.util.Arrays; -import java.util.List; -import org.apache.ignite.Ignite; -import org.apache.ignite.Ignition; -import org.apache.ignite.examples.ExampleNodeStartup; -import org.apache.ignite.examples.ml.math.matrix.SparseDistributedMatrixExample; -import org.apache.ignite.ml.clustering.KMeansDistributedClusterer; -import org.apache.ignite.ml.math.StorageConstants; -import org.apache.ignite.ml.math.Tracer; -import org.apache.ignite.ml.math.Vector; -import org.apache.ignite.ml.math.distances.EuclideanDistance; -import org.apache.ignite.ml.math.impls.matrix.SparseDistributedMatrix; -import org.apache.ignite.thread.IgniteThread; - -/** - *

- * Example of using {@link KMeansDistributedClusterer}.

- *

- * Note that in this example we cannot guarantee order in which nodes return results of intermediate - * computations and therefore algorithm can return different results.

- *

- * Remote nodes should always be started with special configuration file which - * enables P2P class loading: {@code 'ignite.{sh|bat} examples/config/example-ignite.xml'}.

- *

- * Alternatively you can run {@link ExampleNodeStartup} in another JVM which will start node - * with {@code examples/config/example-ignite.xml} configuration.

- */ -public class KMeansDistributedClustererExample { - /** - * Executes example. - * - * @param args Command line arguments, none required. - */ - public static void main(String[] args) throws InterruptedException { - // IMPL NOTE based on KMeansDistributedClustererTestSingleNode#testClusterizationOnDatasetWithObviousStructure - System.out.println(">>> K-means distributed clusterer example started."); - - // Start ignite grid. - try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { - System.out.println(">>> Ignite grid started."); - - // Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread - // because we create ignite cache internally. - IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), - SparseDistributedMatrixExample.class.getSimpleName(), () -> { - - int ptsCnt = 10000; - - SparseDistributedMatrix points = new SparseDistributedMatrix(ptsCnt, 2, - StorageConstants.ROW_STORAGE_MODE, StorageConstants.RANDOM_ACCESS_MODE); - - DatasetWithObviousStructure dataset = new DatasetWithObviousStructure(10000); - - List massCenters = dataset.generate(points); - - EuclideanDistance dist = new EuclideanDistance(); - - KMeansDistributedClusterer clusterer = new KMeansDistributedClusterer(dist, 3, 100, 1L); - - Vector[] resCenters = clusterer.cluster(points, 4).centers(); - - System.out.println("Mass centers:"); - massCenters.forEach(Tracer::showAscii); - - System.out.println("Cluster centers:"); - Arrays.asList(resCenters).forEach(Tracer::showAscii); - - points.destroy(); - - System.out.println("\n>>> K-means distributed clusterer example completed."); - }); - - igniteThread.start(); - - igniteThread.join(); - } - } -} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansLocalClustererExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansLocalClustererExample.java deleted file mode 100644 index 28ca9d940d029..0000000000000 --- a/examples/src/main/java/org/apache/ignite/examples/ml/clustering/KMeansLocalClustererExample.java +++ /dev/null @@ -1,106 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.examples.ml.clustering; - -import java.util.Arrays; -import java.util.Comparator; -import java.util.List; -import org.apache.ignite.ml.clustering.KMeansLocalClusterer; -import org.apache.ignite.ml.clustering.KMeansModel; -import org.apache.ignite.ml.math.Tracer; -import org.apache.ignite.ml.math.Vector; -import org.apache.ignite.ml.math.distances.DistanceMeasure; -import org.apache.ignite.ml.math.distances.EuclideanDistance; -import org.apache.ignite.ml.math.functions.Functions; -import org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix; - -/** - * Example of using {@link KMeansLocalClusterer}. - */ -public class KMeansLocalClustererExample { - /** - * Executes example. - * - * @param args Command line arguments, none required. - */ - public static void main(String[] args) { - // IMPL NOTE based on KMeansDistributedClustererTestSingleNode#testClusterizationOnDatasetWithObviousStructure - System.out.println(">>> K-means local clusterer example started."); - - int ptsCnt = 10000; - DenseLocalOnHeapMatrix points = new DenseLocalOnHeapMatrix(ptsCnt, 2); - - DatasetWithObviousStructure dataset = new DatasetWithObviousStructure(10000); - - List massCenters = dataset.generate(points); - - EuclideanDistance dist = new EuclideanDistance(); - OrderedNodesComparator comp = new OrderedNodesComparator( - dataset.centers().values().toArray(new Vector[] {}), dist); - - massCenters.sort(comp); - - KMeansLocalClusterer clusterer = new KMeansLocalClusterer(dist, 100, 1L); - - KMeansModel mdl = clusterer.cluster(points, 4); - Vector[] resCenters = mdl.centers(); - Arrays.sort(resCenters, comp); - - System.out.println("Mass centers:"); - massCenters.forEach(Tracer::showAscii); - - System.out.println("Cluster centers:"); - Arrays.asList(resCenters).forEach(Tracer::showAscii); - - System.out.println("\n>>> K-means local clusterer example completed."); - } - - /** */ - private static class OrderedNodesComparator implements Comparator { - /** */ - private final DistanceMeasure measure; - - /** */ - List orderedNodes; - - /** */ - OrderedNodesComparator(Vector[] orderedNodes, DistanceMeasure measure) { - this.orderedNodes = Arrays.asList(orderedNodes); - this.measure = measure; - } - - /** */ - private int findClosestNodeIndex(Vector v) { - return Functions.argmin(orderedNodes, v1 -> measure.compute(v1, v)).get1(); - } - - /** */ - @Override public int compare(Vector v1, Vector v2) { - int ind1 = findClosestNodeIndex(v1); - int ind2 = findClosestNodeIndex(v2); - - int signum = (int)Math.signum(ind1 - ind2); - - if (signum != 0) - return signum; - - return (int)Math.signum(orderedNodes.get(ind1).minus(v1).kNorm(2) - - orderedNodes.get(ind2).minus(v2).kNorm(2)); - } - } -} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/Coin.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/Coin.java index 90f6e8fedb52d..4944a6b523dd2 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/Coin.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/Coin.java @@ -23,7 +23,7 @@ * POJO to model a coin */ public class Coin implements Serializable { - + /** Define Coin Type */ public static enum CoinType { PENNY, QUARTER, NICKEL, DIME } @@ -33,6 +33,7 @@ public static enum CoinType { /** number of coins */ private int numberOfCoins = 0; + /** CoinType */ private CoinType coinType = null; /** @@ -80,8 +81,8 @@ public void setCoinType(CoinType coinType) { this.coinType = coinType; } - @Override - public String toString() { + /** {@inheritDoc} */ + @Override public String toString() { return "Coin [numberOfCoins=" + numberOfCoins + ", coinType=" + coinType + "]"; } diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeFitnessFunction.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeFitnessFunction.java index faa113b657f1d..a7c248ad53745 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeFitnessFunction.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeFitnessFunction.java @@ -31,9 +31,8 @@ * * an individual solution relative to other solutions.
*/ - public class OptimizeMakeChangeFitnessFunction implements IFitnessFunction { - + /** target amount */ int targetAmount = 0; /** diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeGAExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeGAExample.java index 000424f003e32..2051946ce1e21 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeGAExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeGAExample.java @@ -19,11 +19,9 @@ import java.util.ArrayList; import java.util.List; - import org.apache.ignite.Ignite; import org.apache.ignite.IgniteLogger; import org.apache.ignite.Ignition; - import org.apache.ignite.ml.genetic.Chromosome; import org.apache.ignite.ml.genetic.GAGrid; import org.apache.ignite.ml.genetic.Gene; @@ -43,19 +41,21 @@ * * mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.change.OptimizeMakeChangeGAExample" * -DAMOUNTCHANGE=75 - * - *

Remote nodes should always be started with special configuration file which enables P2P class loading: {@code - * 'ignite.{sh|bat} examples/config/example-ignite.xml'}.

Alternatively you can run ExampleNodeStartup in - * another JVM which will start node with {@code examples/config/example-ignite.xml} configuration.

*/ - public class OptimizeMakeChangeGAExample { + /** Ignite instance */ private static Ignite ignite = null; + + /** GAGrid */ private static GAGrid gaGrid = null; + + /** GAConfiguration */ private static GAConfiguration gaConfig = null; + /** amount of change */ private static String sAmountChange = null; + /** Ignite logger */ private static IgniteLogger logger = null; /** diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeTerminateCriteria.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeTerminateCriteria.java index 79601fe09052c..ab3f462aae768 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeTerminateCriteria.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/change/OptimizeMakeChangeTerminateCriteria.java @@ -31,8 +31,9 @@ * Terminate Condition implementation for OptimizeMakeChangeGATest
*/ public class OptimizeMakeChangeTerminateCriteria implements ITerminateCriteria { - + /** Ignite logger */ private IgniteLogger igniteLogger = null; + /** Ignite instance */ private Ignite ignite = null; /** @@ -57,7 +58,7 @@ public boolean isTerminationConditionMet(Chromosome fittestChromosome, double av igniteLogger.info("Generation: " + currentGeneration); igniteLogger.info("Fittest is Chromosome Key: " + fittestChromosome); igniteLogger.info("Chromsome: " + fittestChromosome); - printCoins(GAGridUtils.getGenesForChromosome(ignite, fittestChromosome)); + printCoins(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome)); igniteLogger.info("Avg Chromsome Fitness: " + averageFitnessScore); igniteLogger.info("##########################################################################################"); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldFitnessFunction.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldFitnessFunction.java index f5f1ee3081ba5..85c00e902963f 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldFitnessFunction.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldFitnessFunction.java @@ -39,9 +39,8 @@ * * we achieve a fitness score of '11', as 'HELLO WORLD' contains '11' characters. */ - public class HelloWorldFitnessFunction implements IFitnessFunction { - + /** Optimal target solution */ private String targetString = "HELLO WORLD"; /** diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldGAExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldGAExample.java index 70b00d80db05f..4b4fb1c7fb355 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldGAExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldGAExample.java @@ -19,10 +19,8 @@ import java.util.ArrayList; import java.util.List; - import org.apache.ignite.Ignite; import org.apache.ignite.Ignition; - import org.apache.ignite.ml.genetic.Chromosome; import org.apache.ignite.ml.genetic.GAGrid; import org.apache.ignite.ml.genetic.Gene; @@ -39,17 +37,20 @@ * How To Run: * * mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.helloworld.HelloWorldGAExample" - * - *

Remote nodes should always be started with special configuration file which enables P2P class loading: {@code - * 'ignite.{sh|bat} examples/config/example-ignite.xml'}.

Alternatively you can run ExampleNodeStartup in - * another JVM which will start node with {@code examples/config/example-ignite.xml} configuration.

*/ - public class HelloWorldGAExample { + /** Ignite instance */ private static Ignite ignite = null; + /** GAGrid */ private static GAGrid gaGrid = null; + /** GAConfiguration */ private static GAConfiguration gaConfig = null; + /** + * Executes example. + * + * @param args Command line arguments, none required. + */ public static void main(String args[]) { System.setProperty("IGNITE_QUIET", "false"); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldTerminateCriteria.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldTerminateCriteria.java index a0d6f9394634b..24bb5e0365e08 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldTerminateCriteria.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/helloworld/HelloWorldTerminateCriteria.java @@ -33,8 +33,9 @@ * Class terminates Genetic algorithm when fitnessScore > 10 */ public class HelloWorldTerminateCriteria implements ITerminateCriteria { - + /** Ignite logger */ private IgniteLogger igniteLogger = null; + /** Ignite instance */ private Ignite ignite = null; /** diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/Item.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/Item.java new file mode 100644 index 0000000000000..f64cb173bb091 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/Item.java @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.genetic.knapsack; + +import java.io.Serializable; + +/** + * POJO to model an Item + */ +public class Item implements Serializable { + /** weight of item in lbs. */ + private double weight; + /** value of item */ + private double value; + /** name of item */ + private String name; + + /** + * Get the weight + * + * @return Weight + */ + public double getWeight() { + return weight; + } + + /** + * Set the weight + * + * @param weight Weight + */ + public void setWeight(double weight) { + this.weight = weight; + } + + /** + * Get the value + * + * @return Value + */ + public double getValue() { + return value; + } + + /** + * @param value Value + */ + public void setValue(double value) { + this.value = value; + } + + /** + * Get the name + * + * @return Name + */ + public String getName() { + return name; + } + + /** + * Set the name + * + * @param name Name + */ + public void setName(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return "Item [weight=" + weight + ", value=" + value + ", name=" + name + "]"; + } + +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackFitnessFunction.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackFitnessFunction.java new file mode 100644 index 0000000000000..5ccaa8b5b93aa --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackFitnessFunction.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.genetic.knapsack; + +import java.util.ArrayList; +import java.util.List; +import org.apache.ignite.ml.genetic.Gene; +import org.apache.ignite.ml.genetic.IFitnessFunction; + +/** + * This example demonstrates how to create a IFitnessFunction + * + * Your IFitnessFunction will vary depending on your particular use case. + * + * For this fitness function, we simply want to calculate the weight and value of + * + * an individual solution relative to other solutions. + * + * + * To do this, we total the weights and values of all the genes within a chromosome. + */ +public class KnapsackFitnessFunction implements IFitnessFunction { + /** weight capacity of knapsack */ + private double maximumWeight = 20; + + /** + * Calculate fitness + * + * @param genes List of Genes + * @return Fitness value + */ + public double evaluate(List genes) { + + double value = 0; + double weight = 0; + + List dups = new ArrayList(); + int badSolution = 1; + + for (Gene agene : genes) { + weight = weight + ((Item)(agene.getValue())).getWeight(); + value = value + ((Item)(agene.getValue())).getValue(); + + if (dups.contains(agene.id()) || (weight > maximumWeight)) { + badSolution = 0; + break; + } + else + dups.add(agene.id()); + } + + return (value * badSolution); + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackGAExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackGAExample.java new file mode 100644 index 0000000000000..fa2ae68eafcf0 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackGAExample.java @@ -0,0 +1,319 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.genetic.knapsack; + +import java.util.ArrayList; +import java.util.List; +import org.apache.ignite.Ignite; +import org.apache.ignite.Ignition; +import org.apache.ignite.ml.genetic.GAGrid; +import org.apache.ignite.ml.genetic.Gene; +import org.apache.ignite.ml.genetic.parameter.GAConfiguration; + +/** + * This example demonstrates how to use the GAGrid framework. + * + * Example demonstrates Knapsack Problem: Given a set of 30 items, each with a weight and a value, pack 10 items in + * knapsack so that the total weight is less <= 20 lbs. and the total value is maximized. + * + * + * How To Run: + * + * mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.knapsack.KnapsackGAExample" + */ +public class KnapsackGAExample { + /** Ignite instance */ + private static Ignite ignite = null; + /** GAGrid */ + private static GAGrid gaGrid = null; + /** GAConfiguration */ + private static GAConfiguration gaConfig = null; + + /** + * @param args Command line arguments, none required. + */ + public static void main(String args[]) { + System.setProperty("IGNITE_QUIET", "false"); + + try { + + //Create an Ignite instance as you would in any other use case. + ignite = Ignition.start("examples/config/example-ignite.xml"); + + // Create GAConfiguration + gaConfig = new GAConfiguration(); + + // set Gene Pool + List genes = getGenePool(); + + // set the Chromosome Length to '10' since our knapsack may contain a total of 10 items. + gaConfig.setChromosomeLength(10); + + // initialize gene pool + gaConfig.setGenePool(genes); + + // create and set Fitness function + KnapsackFitnessFunction function = new KnapsackFitnessFunction(); + gaConfig.setFitnessFunction(function); + + // create and set TerminateCriteria + KnapsackTerminateCriteria termCriteria = new KnapsackTerminateCriteria(ignite); + gaConfig.setTerminateCriteria(termCriteria); + + ignite.log(); + + gaGrid = new GAGrid(gaConfig, ignite); + // evolve the population + gaGrid.evolve(); + + Ignition.stop(true); + + ignite = null; + + } + catch (Exception e) { + System.out.println(e); + } + + } + + /** + * Helper routine to initialize Gene pool + * + * In typical usecase genes may be stored in database. + * + * @return List + */ + private static List getGenePool() { + List list = new ArrayList(); + + Item item1 = new Item(); + item1.setName("Swiss Army Knife"); + item1.setWeight(0.08125); + item1.setValue(15); + Gene gene1 = new Gene(item1); + + Item item2 = new Item(); + item2.setName("Duct Tape"); + item2.setWeight(1.3); + item2.setValue(3); + Gene gene2 = new Gene(item2); + + Item item3 = new Item(); + item3.setName("Rope (50 feet)"); + item3.setWeight(7); + item3.setValue(10); + Gene gene3 = new Gene(item3); + + Item item4 = new Item(); + item4.setName("Satellite phone"); + item4.setWeight(2); + item4.setValue(8); + Gene gene4 = new Gene(item4); + + Item item5 = new Item(); + item5.setName("Elmer's Glue"); + item5.setWeight(0.25); + item5.setValue(2); + Gene gene5 = new Gene(item5); + + Item item6 = new Item(); + item6.setName("Toilet Paper Roll"); + item6.setWeight(.5); + item6.setValue(4); + Gene gene6 = new Gene(item6); + + Item item7 = new Item(); + item7.setName("Binoculars"); + item7.setWeight(3); + item7.setValue(5); + Gene gene7 = new Gene(item7); + + Item item8 = new Item(); + item8.setName("Compass"); + item8.setWeight(0.0573202); + item8.setValue(15); + Gene gene8 = new Gene(item8); + + Item item9 = new Item(); + item9.setName("Jug (prefilled with water)"); + item9.setWeight(4); + item9.setValue(6); + Gene gene9 = new Gene(item9); + + Item item10 = new Item(); + item10.setName("Flashlight"); + item10.setWeight(2); + item10.setValue(4); + Gene gene10 = new Gene(item10); + + Item item11 = new Item(); + item11.setName("Box of paper clips"); + item11.setWeight(.9); + item11.setValue(2); + Gene gene11 = new Gene(item11); + + Item item12 = new Item(); + item12.setName("Gloves (1 pair)"); + item12.setWeight(.8125); + item12.setValue(3); + Gene gene12 = new Gene(item12); + + Item item13 = new Item(); + item13.setName("Scissors"); + item13.setWeight(0.2); + item13.setValue(2); + Gene gene13 = new Gene(item13); + + Item item14 = new Item(); + item14.setName("Signal Flair (4pk)"); + item14.setWeight(4); + item14.setValue(5); + Gene gene14 = new Gene(item14); + + Item item15 = new Item(); + item15.setName("Water Purifying System"); + item15.setWeight(0.5125); + item15.setValue(4); + Gene gene15 = new Gene(item15); + + Item item16 = new Item(); + item16.setName("Whistle"); + item16.setWeight(0.075); + item16.setValue(2); + Gene gene16 = new Gene(item16); + + Item item17 = new Item(); + item17.setName("Sleeping Bag"); + item17.setWeight(0.38125); + item17.setValue(4); + Gene gene17 = new Gene(item17); + + Item item18 = new Item(); + item18.setName("Insect Repellent"); + item18.setWeight(1.15); + item18.setValue(3); + Gene gene18 = new Gene(item18); + + Item item19 = new Item(); + item19.setName("Trowel"); + item19.setWeight(0.31875); + item19.setValue(3); + Gene gene19 = new Gene(item19); + + Item item20 = new Item(); + item20.setName("Lighter"); + item20.setWeight(.2); + item20.setValue(4); + Gene gene20 = new Gene(item20); + + Item item21 = new Item(); + item21.setName("Safety Horn"); + item21.setWeight(.21); + item21.setValue(3); + Gene gene21 = new Gene(item21); + + Item item22 = new Item(); + item22.setName("Headlamp"); + item22.setWeight(.8); + item22.setValue(4); + Gene gene22 = new Gene(item22); + + Item item23 = new Item(); + item23.setName("Freeze Dried Food Kit"); + item23.setWeight(2); + item23.setValue(6); + Gene gene23 = new Gene(item23); + + Item item24 = new Item(); + item24.setName("Sunscreen"); + item24.setWeight(.5); + item24.setValue(4); + Gene gene24 = new Gene(item24); + + Item item25 = new Item(); + item25.setName("Trekking Pole (Adjustable)"); + item25.setWeight(1.3); + item25.setValue(4); + Gene gene25 = new Gene(item25); + + Item item26 = new Item(); + item26.setName("Counter Assault Bear Spray"); + item26.setWeight(.5); + item26.setValue(4); + Gene gene26 = new Gene(item26); + + Item item27 = new Item(); + item27.setName("Insect Spray"); + item27.setWeight(.5); + item27.setValue(3); + Gene gene27 = new Gene(item27); + + Item item28 = new Item(); + item28.setName("Hand sanitizer"); + item28.setWeight(.625); + item28.setValue(3); + Gene gene28 = new Gene(item28); + + Item item29 = new Item(); + item29.setName("Mirror"); + item29.setWeight(.5); + item29.setValue(3); + Gene gene29 = new Gene(item29); + + Item item30 = new Item(); + item30.setName("First Aid Kit"); + item30.setWeight(3); + item30.setValue(6); + Gene gene30 = new Gene(item30); + + list.add(gene1); + list.add(gene2); + list.add(gene3); + list.add(gene4); + list.add(gene5); + list.add(gene6); + list.add(gene7); + list.add(gene8); + list.add(gene9); + list.add(gene10); + list.add(gene11); + list.add(gene12); + list.add(gene13); + list.add(gene14); + list.add(gene15); + list.add(gene16); + list.add(gene17); + list.add(gene18); + list.add(gene19); + list.add(gene20); + list.add(gene21); + list.add(gene22); + list.add(gene23); + list.add(gene24); + list.add(gene25); + list.add(gene26); + list.add(gene27); + list.add(gene28); + list.add(gene29); + list.add(gene30); + + return list; + } + +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackTerminateCriteria.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackTerminateCriteria.java new file mode 100644 index 0000000000000..a10a6fc12754a --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/knapsack/KnapsackTerminateCriteria.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.genetic.knapsack; + +import java.util.List; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.ml.genetic.Chromosome; +import org.apache.ignite.ml.genetic.Gene; +import org.apache.ignite.ml.genetic.parameter.ITerminateCriteria; +import org.apache.ignite.ml.genetic.utils.GAGridUtils; + +/** + * Represents the terminate condition for Knapsack Genetic algorithm + * + * Class terminates Genetic algorithm when once GA Grid has performed 30 generations. + */ +public class KnapsackTerminateCriteria implements ITerminateCriteria { + /** Ignite instance */ + private static Ignite ignite = null; + + /** Ignite logger */ + private IgniteLogger igniteLogger = null; + + /** + * @param ignite Ignite + */ + public KnapsackTerminateCriteria(Ignite ignite) { + this.ignite = ignite; + this.igniteLogger = this.ignite.log(); + } + + /** + * @param fittestChromosome Most fit chromosome at for the nth generation + * @param averageFitnessScore Average fitness score as of the nth generation + * @param currentGeneration Current generation + * @return Boolean value + */ + public boolean isTerminationConditionMet(Chromosome fittestChromosome, double averageFitnessScore, + int currentGeneration) { + boolean isTerminate = true; + + igniteLogger.info("##########################################################################################"); + igniteLogger.info("Generation: " + currentGeneration); + igniteLogger.info("Fittest is Chromosome Key: " + fittestChromosome); + igniteLogger.info("Total value is: " + fittestChromosome.getFitnessScore()); + igniteLogger.info("Total weight is: " + calculateTotalWeight(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome))); + igniteLogger.info("Avg Chromosome Fitness: " + averageFitnessScore); + igniteLogger.info("Chromosome: " + fittestChromosome); + printItems(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome)); + igniteLogger.info("##########################################################################################"); + + if (!(currentGeneration > 29)) + isTerminate = false; + + return isTerminate; + } + + /** + * @param genes List of Genes + * @return double value + */ + private double calculateTotalWeight(List genes) { + double totalWeight = 0; + for (Gene gene : genes) + totalWeight = totalWeight + ((Item)gene.getValue()).getWeight(); + + return totalWeight; + } + + /** + * Helper to print items in knapsack + * + * @param genes List of Genes + */ + private void printItems(List genes) { + for (Gene gene : genes) { + igniteLogger.info("------------------------------------------------------------------------------------------"); + igniteLogger.info("Name: " + ((Item)gene.getValue()).getName().toString()); + igniteLogger.info("Weight: " + ((Item)gene.getValue()).getWeight()); + igniteLogger.info("Value: " + ((Item)gene.getValue()).getValue()); + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/Movie.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/Movie.java index fd4afda4d84d7..38d27ff294896 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/Movie.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/Movie.java @@ -22,7 +22,6 @@ /** * POJO to model a movie. */ - public class Movie { /** name of movie */ private String name; @@ -125,7 +124,8 @@ public void setRating(String rating) { this.rating = rating; } - public String toString() { + /** {@inheritDoc} */ + @Override public String toString() { return "Movie [name=" + name + ", genre=" + genre + ", rating=" + rating + ", imdbRating=" + imdbRating + ", year=" + year + "]"; } diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieFitnessFunction.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieFitnessFunction.java index 9690d692986aa..20e04ce38f41b 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieFitnessFunction.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieFitnessFunction.java @@ -45,9 +45,8 @@ * * fitness score. */ - public class MovieFitnessFunction implements IFitnessFunction { - + /** genes */ private List genres = null; /** diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieGAExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieGAExample.java index f14cb83103d23..7276827c49ab7 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieGAExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieGAExample.java @@ -20,10 +20,8 @@ import java.util.ArrayList; import java.util.List; import java.util.StringTokenizer; - import org.apache.ignite.Ignite; import org.apache.ignite.Ignition; - import org.apache.ignite.ml.genetic.Chromosome; import org.apache.ignite.ml.genetic.GAGrid; import org.apache.ignite.ml.genetic.Gene; @@ -40,15 +38,13 @@ * How To Run: * * mvn exec:java -Dexec.mainClass="org.apache.ignite.examples.ml.genetic.movie.MovieGAExample" -DGENRES=Action,Comedy - * - *

Remote nodes should always be started with special configuration file which enables P2P class loading: {@code - * 'ignite.{sh|bat} examples/config/example-ignite.xml'}.

Alternatively you can run ExampleNodeStartup in - * another JVM which will start node with {@code examples/config/example-ignite.xml} configuration.

*/ - public class MovieGAExample { + /** Ignite instance */ private static Ignite ignite = null; + /** GAGrid */ private static GAGrid gaGrid = null; + /** GAConfiguration */ private static GAConfiguration gaConfig = null; /** diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieTerminateCriteria.java b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieTerminateCriteria.java index 904c66dc67569..34a7331758203 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieTerminateCriteria.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/genetic/movie/MovieTerminateCriteria.java @@ -32,10 +32,10 @@ * * Class terminates Genetic algorithm when fitnessScore > 32
*/ - public class MovieTerminateCriteria implements ITerminateCriteria { - + /** Ignite logger */ private IgniteLogger igniteLogger = null; + /** Ignite instance */ private Ignite ignite = null; /** @@ -61,7 +61,7 @@ public boolean isTerminationConditionMet(Chromosome fittestChromosome, double av igniteLogger.info("Generation: " + currentGeneration); igniteLogger.info("Fittest is Chromosome Key: " + fittestChromosome); igniteLogger.info("Chromsome: " + fittestChromosome); - printMovies(GAGridUtils.getGenesForChromosome(ignite, fittestChromosome)); + printMovies(GAGridUtils.getGenesInOrderForChromosome(ignite, fittestChromosome)); igniteLogger.info("##########################################################################################"); if (!(fittestChromosome.getFitnessScore() > 32)) { diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNClassificationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNClassificationExample.java index f3cdbbefc2a2a..15375a13ff306 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNClassificationExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNClassificationExample.java @@ -17,9 +17,6 @@ package org.apache.ignite.examples.ml.knn; -import java.util.Arrays; -import java.util.UUID; -import javax.cache.Cache; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.Ignition; @@ -27,7 +24,6 @@ import org.apache.ignite.cache.query.QueryCursor; import org.apache.ignite.cache.query.ScanQuery; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilder; import org.apache.ignite.ml.knn.classification.KNNClassificationModel; import org.apache.ignite.ml.knn.classification.KNNClassificationTrainer; import org.apache.ignite.ml.knn.classification.KNNStrategy; @@ -35,6 +31,10 @@ import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; import org.apache.ignite.thread.IgniteThread; +import javax.cache.Cache; +import java.util.Arrays; +import java.util.UUID; + /** * Run kNN multi-class classification trainer over distributed dataset. * @@ -56,7 +56,8 @@ public static void main(String[] args) throws InterruptedException { KNNClassificationTrainer trainer = new KNNClassificationTrainer(); KNNClassificationModel knnMdl = trainer.fit( - new CacheBasedDatasetBuilder<>(ignite, dataCache), + ignite, + dataCache, (k, v) -> Arrays.copyOfRange(v, 1, v.length), (k, v) -> v[0] ).withK(3) @@ -79,7 +80,7 @@ public static void main(String[] args) throws InterruptedException { double prediction = knnMdl.apply(new DenseLocalOnHeapVector(inputs)); totalAmount++; - if(groundTruth != prediction) + if (groundTruth != prediction) amountOfErrors++; System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth); @@ -88,7 +89,7 @@ public static void main(String[] args) throws InterruptedException { System.out.println(">>> ---------------------------------"); System.out.println("\n>>> Absolute amount of errors " + amountOfErrors); - System.out.println("\n>>> Accuracy " + (1 - amountOfErrors / (double)totalAmount)); + System.out.println("\n>>> Accuracy " + (1 - amountOfErrors / (double) totalAmount)); } }); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNRegressionExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNRegressionExample.java new file mode 100644 index 0000000000000..76a07cd9ce244 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/knn/KNNRegressionExample.java @@ -0,0 +1,310 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.knn; + +import java.util.Arrays; +import java.util.UUID; +import javax.cache.Cache; +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilder; +import org.apache.ignite.ml.knn.classification.KNNClassificationTrainer; +import org.apache.ignite.ml.knn.classification.KNNStrategy; +import org.apache.ignite.ml.knn.regression.KNNRegressionModel; +import org.apache.ignite.ml.knn.regression.KNNRegressionTrainer; +import org.apache.ignite.ml.math.distances.ManhattanDistance; +import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; +import org.apache.ignite.thread.IgniteThread; + +/** + * Run kNN regression trainer over distributed dataset. + * + * @see KNNClassificationTrainer + */ +public class KNNRegressionExample { + /** Run example. */ + public static void main(String[] args) throws InterruptedException { + System.out.println(); + System.out.println(">>> kNN regression over cached dataset usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), + KNNRegressionExample.class.getSimpleName(), () -> { + IgniteCache dataCache = getTestCache(ignite); + + KNNRegressionTrainer trainer = new KNNRegressionTrainer(); + + KNNRegressionModel knnMdl = (KNNRegressionModel) trainer.fit( + new CacheBasedDatasetBuilder<>(ignite, dataCache), + (k, v) -> Arrays.copyOfRange(v, 1, v.length), + (k, v) -> v[0] + ).withK(5) + .withDistanceMeasure(new ManhattanDistance()) + .withStrategy(KNNStrategy.WEIGHTED); + + int totalAmount = 0; + // Calculate mean squared error (MSE) + double mse = 0.0; + // Calculate mean absolute error (MAE) + double mae = 0.0; + + try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { + for (Cache.Entry observation : observations) { + double[] val = observation.getValue(); + double[] inputs = Arrays.copyOfRange(val, 1, val.length); + double groundTruth = val[0]; + + double prediction = knnMdl.apply(new DenseLocalOnHeapVector(inputs)); + + mse += Math.pow(prediction - groundTruth, 2.0); + mae += Math.abs(prediction - groundTruth); + + totalAmount++; + } + + mse = mse / totalAmount; + System.out.println("\n>>> Mean squared error (MSE) " + mse); + + mae = mae / totalAmount; + System.out.println("\n>>> Mean absolute error (MAE) " + mae); + } + }); + + igniteThread.start(); + igniteThread.join(); + } + } + + /** + * Fills cache with data and returns it. + * + * @param ignite Ignite instance. + * @return Filled Ignite Cache. + */ + private static IgniteCache getTestCache(Ignite ignite) { + CacheConfiguration cacheConfiguration = new CacheConfiguration<>(); + cacheConfiguration.setName("TEST_" + UUID.randomUUID()); + cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache cache = ignite.createCache(cacheConfiguration); + + for (int i = 0; i < data.length; i++) + cache.put(i, data[i]); + + return cache; + } + + /** The Iris dataset. */ + private static final double[][] data = { + {199, 125, 256, 6000, 256, 16, 128}, + {253, 29, 8000, 32000, 32, 8, 32}, + {132, 29, 8000, 16000, 32, 8, 16}, + {290, 26, 8000, 32000, 64, 8, 32}, + {381, 23, 16000, 32000, 64, 16, 32}, + {749, 23, 16000, 64000, 64, 16, 32}, + {1238, 23, 32000, 64000, 128, 32, 64}, + {23, 400, 1000, 3000, 0, 1, 2}, + {24, 400, 512, 3500, 4, 1, 6}, + {70, 60, 2000, 8000, 65, 1, 8}, + {117, 50, 4000, 16000, 65, 1, 8}, + {15, 350, 64, 64, 0, 1, 4}, + {64, 200, 512, 16000, 0, 4, 32}, + {23, 167, 524, 2000, 8, 4, 15}, + {29, 143, 512, 5000, 0, 7, 32}, + {22, 143, 1000, 2000, 0, 5, 16}, + {124, 110, 5000, 5000, 142, 8, 64}, + {35, 143, 1500, 6300, 0, 5, 32}, + {39, 143, 3100, 6200, 0, 5, 20}, + {40, 143, 2300, 6200, 0, 6, 64}, + {45, 110, 3100, 6200, 0, 6, 64}, + {28, 320, 128, 6000, 0, 1, 12}, + {21, 320, 512, 2000, 4, 1, 3}, + {28, 320, 256, 6000, 0, 1, 6}, + {22, 320, 256, 3000, 4, 1, 3}, + {28, 320, 512, 5000, 4, 1, 5}, + {27, 320, 256, 5000, 4, 1, 6}, + {102, 25, 1310, 2620, 131, 12, 24}, + {74, 50, 2620, 10480, 30, 12, 24}, + {138, 56, 5240, 20970, 30, 12, 24}, + {136, 64, 5240, 20970, 30, 12, 24}, + {23, 50, 500, 2000, 8, 1, 4}, + {29, 50, 1000, 4000, 8, 1, 5}, + {44, 50, 2000, 8000, 8, 1, 5}, + {30, 50, 1000, 4000, 8, 3, 5}, + {41, 50, 1000, 8000, 8, 3, 5}, + {74, 50, 2000, 16000, 8, 3, 5}, + {54, 133, 1000, 12000, 9, 3, 12}, + {41, 133, 1000, 8000, 9, 3, 12}, + {18, 810, 512, 512, 8, 1, 1}, + {28, 810, 1000, 5000, 0, 1, 1}, + {36, 320, 512, 8000, 4, 1, 5}, + {38, 200, 512, 8000, 8, 1, 8}, + {34, 700, 384, 8000, 0, 1, 1}, + {19, 700, 256, 2000, 0, 1, 1}, + {72, 140, 1000, 16000, 16, 1, 3}, + {36, 200, 1000, 8000, 0, 1, 2}, + {30, 110, 1000, 4000, 16, 1, 2}, + {56, 110, 1000, 12000, 16, 1, 2}, + {42, 220, 1000, 8000, 16, 1, 2}, + {34, 800, 256, 8000, 0, 1, 4}, + {19, 125, 512, 1000, 0, 8, 20}, + {75, 75, 2000, 8000, 64, 1, 38}, + {113, 75, 2000, 16000, 64, 1, 38}, + {157, 75, 2000, 16000, 128, 1, 38}, + {18, 90, 256, 1000, 0, 3, 10}, + {20, 105, 256, 2000, 0, 3, 10}, + {28, 105, 1000, 4000, 0, 3, 24}, + {33, 105, 2000, 4000, 8, 3, 19}, + {47, 75, 2000, 8000, 8, 3, 24}, + {54, 75, 3000, 8000, 8, 3, 48}, + {20, 175, 256, 2000, 0, 3, 24}, + {23, 300, 768, 3000, 0, 6, 24}, + {25, 300, 768, 3000, 6, 6, 24}, + {52, 300, 768, 12000, 6, 6, 24}, + {27, 300, 768, 4500, 0, 1, 24}, + {50, 300, 384, 12000, 6, 1, 24}, + {18, 300, 192, 768, 6, 6, 24}, + {53, 180, 768, 12000, 6, 1, 31}, + {23, 330, 1000, 3000, 0, 2, 4}, + {30, 300, 1000, 4000, 8, 3, 64}, + {73, 300, 1000, 16000, 8, 2, 112}, + {20, 330, 1000, 2000, 0, 1, 2}, + {25, 330, 1000, 4000, 0, 3, 6}, + {28, 140, 2000, 4000, 0, 3, 6}, + {29, 140, 2000, 4000, 0, 4, 8}, + {32, 140, 2000, 4000, 8, 1, 20}, + {175, 140, 2000, 32000, 32, 1, 20}, + {57, 140, 2000, 8000, 32, 1, 54}, + {181, 140, 2000, 32000, 32, 1, 54}, + {32, 140, 2000, 4000, 8, 1, 20}, + {82, 57, 4000, 16000, 1, 6, 12}, + {171, 57, 4000, 24000, 64, 12, 16}, + {361, 26, 16000, 32000, 64, 16, 24}, + {350, 26, 16000, 32000, 64, 8, 24}, + {220, 26, 8000, 32000, 0, 8, 24}, + {113, 26, 8000, 16000, 0, 8, 16}, + {15, 480, 96, 512, 0, 1, 1}, + {21, 203, 1000, 2000, 0, 1, 5}, + {35, 115, 512, 6000, 16, 1, 6}, + {18, 1100, 512, 1500, 0, 1, 1}, + {20, 1100, 768, 2000, 0, 1, 1}, + {20, 600, 768, 2000, 0, 1, 1}, + {28, 400, 2000, 4000, 0, 1, 1}, + {45, 400, 4000, 8000, 0, 1, 1}, + {18, 900, 1000, 1000, 0, 1, 2}, + {17, 900, 512, 1000, 0, 1, 2}, + {26, 900, 1000, 4000, 4, 1, 2}, + {28, 900, 1000, 4000, 8, 1, 2}, + {28, 900, 2000, 4000, 0, 3, 6}, + {31, 225, 2000, 4000, 8, 3, 6}, + {42, 180, 2000, 8000, 8, 1, 6}, + {76, 185, 2000, 16000, 16, 1, 6}, + {76, 180, 2000, 16000, 16, 1, 6}, + {26, 225, 1000, 4000, 2, 3, 6}, + {59, 25, 2000, 12000, 8, 1, 4}, + {65, 25, 2000, 12000, 16, 3, 5}, + {101, 17, 4000, 16000, 8, 6, 12}, + {116, 17, 4000, 16000, 32, 6, 12}, + {18, 1500, 768, 1000, 0, 0, 0}, + {20, 1500, 768, 2000, 0, 0, 0}, + {20, 800, 768, 2000, 0, 0, 0}, + {30, 50, 2000, 4000, 0, 3, 6}, + {44, 50, 2000, 8000, 8, 3, 6}, + {82, 50, 2000, 16000, 24, 1, 6}, + {128, 50, 8000, 16000, 48, 1, 10}, + {37, 100, 1000, 8000, 0, 2, 6}, + {46, 100, 1000, 8000, 24, 2, 6}, + {46, 100, 1000, 8000, 24, 3, 6}, + {80, 50, 2000, 16000, 12, 3, 16}, + {88, 50, 2000, 16000, 24, 6, 16}, + {33, 150, 512, 4000, 0, 8, 128}, + {46, 115, 2000, 8000, 16, 1, 3}, + {29, 115, 2000, 4000, 2, 1, 5}, + {53, 92, 2000, 8000, 32, 1, 6}, + {41, 92, 2000, 8000, 4, 1, 6}, + {86, 75, 4000, 16000, 16, 1, 6}, + {95, 60, 4000, 16000, 32, 1, 6}, + {107, 60, 2000, 16000, 64, 5, 8}, + {117, 60, 4000, 16000, 64, 5, 8}, + {119, 50, 4000, 16000, 64, 5, 10}, + {120, 72, 4000, 16000, 64, 8, 16}, + {48, 72, 2000, 8000, 16, 6, 8}, + {126, 40, 8000, 16000, 32, 8, 16}, + {266, 40, 8000, 32000, 64, 8, 24}, + {270, 35, 8000, 32000, 64, 8, 24}, + {426, 38, 16000, 32000, 128, 16, 32}, + {151, 48, 4000, 24000, 32, 8, 24}, + {267, 38, 8000, 32000, 64, 8, 24}, + {603, 30, 16000, 32000, 256, 16, 24}, + {19, 112, 1000, 1000, 0, 1, 4}, + {21, 84, 1000, 2000, 0, 1, 6}, + {26, 56, 1000, 4000, 0, 1, 6}, + {35, 56, 2000, 6000, 0, 1, 8}, + {41, 56, 2000, 8000, 0, 1, 8}, + {47, 56, 4000, 8000, 0, 1, 8}, + {62, 56, 4000, 12000, 0, 1, 8}, + {78, 56, 4000, 16000, 0, 1, 8}, + {80, 38, 4000, 8000, 32, 16, 32}, + {142, 38, 8000, 16000, 64, 4, 8}, + {281, 38, 8000, 24000, 160, 4, 8}, + {190, 38, 4000, 16000, 128, 16, 32}, + {21, 200, 1000, 2000, 0, 1, 2}, + {25, 200, 1000, 4000, 0, 1, 4}, + {67, 200, 2000, 8000, 64, 1, 5}, + {24, 250, 512, 4000, 0, 1, 7}, + {24, 250, 512, 4000, 0, 4, 7}, + {64, 250, 1000, 16000, 1, 1, 8}, + {25, 160, 512, 4000, 2, 1, 5}, + {20, 160, 512, 2000, 2, 3, 8}, + {29, 160, 1000, 4000, 8, 1, 14}, + {43, 160, 1000, 8000, 16, 1, 14}, + {53, 160, 2000, 8000, 32, 1, 13}, + {19, 240, 512, 1000, 8, 1, 3}, + {22, 240, 512, 2000, 8, 1, 5}, + {31, 105, 2000, 4000, 8, 3, 8}, + {41, 105, 2000, 6000, 16, 6, 16}, + {47, 105, 2000, 8000, 16, 4, 14}, + {99, 52, 4000, 16000, 32, 4, 12}, + {67, 70, 4000, 12000, 8, 6, 8}, + {81, 59, 4000, 12000, 32, 6, 12}, + {149, 59, 8000, 16000, 64, 12, 24}, + {183, 26, 8000, 24000, 32, 8, 16}, + {275, 26, 8000, 32000, 64, 12, 16}, + {382, 26, 8000, 32000, 128, 24, 32}, + {56, 116, 2000, 8000, 32, 5, 28}, + {182, 50, 2000, 32000, 24, 6, 26}, + {227, 50, 2000, 32000, 48, 26, 52}, + {341, 50, 2000, 32000, 112, 52, 104}, + {360, 50, 4000, 32000, 112, 52, 104}, + {919, 30, 8000, 64000, 96, 12, 176}, + {978, 30, 8000, 64000, 128, 12, 176}, + {24, 180, 262, 4000, 0, 1, 3}, + {37, 124, 1000, 8000, 0, 1, 8}, + {50, 98, 1000, 8000, 32, 2, 8}, + {41, 125, 2000, 8000, 0, 2, 14}, + {47, 480, 512, 8000, 32, 0, 0}, + {25, 480, 1000, 4000, 0, 0, 0} + }; +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/nn/MLPTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/nn/MLPTrainerExample.java index efa1ba73d0401..4120f31ddd9d3 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/nn/MLPTrainerExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/nn/MLPTrainerExample.java @@ -23,25 +23,21 @@ import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.examples.ExampleNodeStartup; -import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilder; import org.apache.ignite.ml.math.Matrix; import org.apache.ignite.ml.math.impls.matrix.DenseLocalOnHeapMatrix; import org.apache.ignite.ml.nn.Activators; import org.apache.ignite.ml.nn.MLPTrainer; import org.apache.ignite.ml.nn.MultilayerPerceptron; +import org.apache.ignite.ml.nn.UpdatesStrategy; import org.apache.ignite.ml.nn.architecture.MLPArchitecture; import org.apache.ignite.ml.optimization.LossFunctions; import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDParameterUpdate; import org.apache.ignite.ml.optimization.updatecalculators.SimpleGDUpdateCalculator; -import org.apache.ignite.ml.trainers.group.UpdatesStrategy; import org.apache.ignite.thread.IgniteThread; /** * Example of using distributed {@link MultilayerPerceptron}. - *

- * Remote nodes should always be started with special configuration file which - * enables P2P class loading: {@code 'ignite.{sh|bat} examples/config/example-ignite.xml'}.

- *

+ * * Alternatively you can run {@link ExampleNodeStartup} in another JVM which will start node * with {@code examples/config/example-ignite.xml} configuration.

*/ @@ -99,7 +95,8 @@ public static void main(String[] args) throws InterruptedException { // Train neural network and get multilayer perceptron model. MultilayerPerceptron mlp = trainer.fit( - new CacheBasedDatasetBuilder<>(ignite, trainingSet), + ignite, + trainingSet, (k, v) -> new double[] {v.x, v.y}, (k, v) -> new double[] {v.lb} ); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/NormalizationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/NormalizationExample.java index e0bcd089b674b..b2c4e128da332 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/NormalizationExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/preprocessing/NormalizationExample.java @@ -17,21 +17,19 @@ package org.apache.ignite.examples.ml.preprocessing; -import java.util.Arrays; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.Ignition; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.examples.ml.dataset.model.Person; -import org.apache.ignite.ml.dataset.DatasetBuilder; import org.apache.ignite.ml.dataset.DatasetFactory; -import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilder; import org.apache.ignite.ml.dataset.primitive.SimpleDataset; import org.apache.ignite.ml.math.functions.IgniteBiFunction; -import org.apache.ignite.ml.preprocessing.normalization.NormalizationPreprocessor; import org.apache.ignite.ml.preprocessing.normalization.NormalizationTrainer; +import java.util.Arrays; + /** * Example that shows how to use normalization preprocessor to normalize data. * @@ -47,8 +45,6 @@ public static void main(String[] args) throws Exception { IgniteCache persons = createCache(ignite); - DatasetBuilder builder = new CacheBasedDatasetBuilder<>(ignite, persons); - // Defines first preprocessor that extracts features from an upstream data. IgniteBiFunction featureExtractor = (k, v) -> new double[] { v.getAge(), @@ -56,14 +52,11 @@ public static void main(String[] args) throws Exception { }; // Defines second preprocessor that normalizes features. - NormalizationPreprocessor preprocessor = new NormalizationTrainer() - .fit(builder, featureExtractor, 2); + IgniteBiFunction preprocessor = new NormalizationTrainer() + .fit(ignite, persons, featureExtractor); // Creates a cache based simple dataset containing features and providing standard dataset API. - try (SimpleDataset dataset = DatasetFactory.createSimpleDataset( - builder, - preprocessor - )) { + try (SimpleDataset dataset = DatasetFactory.createSimpleDataset(ignite, persons, preprocessor)) { // Calculation of the mean value. This calculation will be performed in map-reduce manner. double[] mean = dataset.mean(); System.out.println("Mean \n\t" + Arrays.toString(mean)); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithLSQRTrainerAndNormalizationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithLSQRTrainerAndNormalizationExample.java deleted file mode 100644 index 567a59975a385..0000000000000 --- a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithLSQRTrainerAndNormalizationExample.java +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.examples.ml.regression.linear; - -import java.util.Arrays; -import java.util.UUID; -import javax.cache.Cache; -import org.apache.ignite.Ignite; -import org.apache.ignite.IgniteCache; -import org.apache.ignite.Ignition; -import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; -import org.apache.ignite.cache.query.QueryCursor; -import org.apache.ignite.cache.query.ScanQuery; -import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.examples.ml.math.matrix.SparseDistributedMatrixExample; -import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilder; -import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; -import org.apache.ignite.ml.preprocessing.normalization.NormalizationPreprocessor; -import org.apache.ignite.ml.preprocessing.normalization.NormalizationTrainer; -import org.apache.ignite.ml.regressions.linear.LinearRegressionLSQRTrainer; -import org.apache.ignite.ml.regressions.linear.LinearRegressionModel; -import org.apache.ignite.thread.IgniteThread; - -/** - * Run linear regression model over distributed matrix. - * - * @see LinearRegressionLSQRTrainer - * @see NormalizationTrainer - * @see NormalizationPreprocessor - */ -public class DistributedLinearRegressionWithLSQRTrainerAndNormalizationExample { - /** */ - private static final double[][] data = { - {8, 78, 284, 9.100000381, 109}, - {9.300000191, 68, 433, 8.699999809, 144}, - {7.5, 70, 739, 7.199999809, 113}, - {8.899999619, 96, 1792, 8.899999619, 97}, - {10.19999981, 74, 477, 8.300000191, 206}, - {8.300000191, 111, 362, 10.89999962, 124}, - {8.800000191, 77, 671, 10, 152}, - {8.800000191, 168, 636, 9.100000381, 162}, - {10.69999981, 82, 329, 8.699999809, 150}, - {11.69999981, 89, 634, 7.599999905, 134}, - {8.5, 149, 631, 10.80000019, 292}, - {8.300000191, 60, 257, 9.5, 108}, - {8.199999809, 96, 284, 8.800000191, 111}, - {7.900000095, 83, 603, 9.5, 182}, - {10.30000019, 130, 686, 8.699999809, 129}, - {7.400000095, 145, 345, 11.19999981, 158}, - {9.600000381, 112, 1357, 9.699999809, 186}, - {9.300000191, 131, 544, 9.600000381, 177}, - {10.60000038, 80, 205, 9.100000381, 127}, - {9.699999809, 130, 1264, 9.199999809, 179}, - {11.60000038, 140, 688, 8.300000191, 80}, - {8.100000381, 154, 354, 8.399999619, 103}, - {9.800000191, 118, 1632, 9.399999619, 101}, - {7.400000095, 94, 348, 9.800000191, 117}, - {9.399999619, 119, 370, 10.39999962, 88}, - {11.19999981, 153, 648, 9.899999619, 78}, - {9.100000381, 116, 366, 9.199999809, 102}, - {10.5, 97, 540, 10.30000019, 95}, - {11.89999962, 176, 680, 8.899999619, 80}, - {8.399999619, 75, 345, 9.600000381, 92}, - {5, 134, 525, 10.30000019, 126}, - {9.800000191, 161, 870, 10.39999962, 108}, - {9.800000191, 111, 669, 9.699999809, 77}, - {10.80000019, 114, 452, 9.600000381, 60}, - {10.10000038, 142, 430, 10.69999981, 71}, - {10.89999962, 238, 822, 10.30000019, 86}, - {9.199999809, 78, 190, 10.69999981, 93}, - {8.300000191, 196, 867, 9.600000381, 106}, - {7.300000191, 125, 969, 10.5, 162}, - {9.399999619, 82, 499, 7.699999809, 95}, - {9.399999619, 125, 925, 10.19999981, 91}, - {9.800000191, 129, 353, 9.899999619, 52}, - {3.599999905, 84, 288, 8.399999619, 110}, - {8.399999619, 183, 718, 10.39999962, 69}, - {10.80000019, 119, 540, 9.199999809, 57}, - {10.10000038, 180, 668, 13, 106}, - {9, 82, 347, 8.800000191, 40}, - {10, 71, 345, 9.199999809, 50}, - {11.30000019, 118, 463, 7.800000191, 35}, - {11.30000019, 121, 728, 8.199999809, 86}, - {12.80000019, 68, 383, 7.400000095, 57}, - {10, 112, 316, 10.39999962, 57}, - {6.699999809, 109, 388, 8.899999619, 94} - }; - - /** Run example. */ - public static void main(String[] args) throws InterruptedException { - System.out.println(); - System.out.println(">>> Linear regression model over sparse distributed matrix API usage example started."); - // Start ignite grid. - try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { - System.out.println(">>> Ignite grid started."); - - // Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread - // because we create ignite cache internally. - IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), - SparseDistributedMatrixExample.class.getSimpleName(), () -> { - IgniteCache dataCache = getTestCache(ignite); - - System.out.println(">>> Create new normalization trainer object."); - NormalizationTrainer normalizationTrainer = new NormalizationTrainer<>(); - - System.out.println(">>> Perform the training to get the normalization preprocessor."); - NormalizationPreprocessor preprocessor = normalizationTrainer.fit( - new CacheBasedDatasetBuilder<>(ignite, dataCache), - (k, v) -> Arrays.copyOfRange(v, 1, v.length), - 4 - ); - - System.out.println(">>> Create new linear regression trainer object."); - LinearRegressionLSQRTrainer trainer = new LinearRegressionLSQRTrainer(); - - System.out.println(">>> Perform the training to get the model."); - LinearRegressionModel mdl = trainer.fit( - new CacheBasedDatasetBuilder<>(ignite, dataCache), - preprocessor, - (k, v) -> v[0] - ); - - System.out.println(">>> Linear regression model: " + mdl); - - System.out.println(">>> ---------------------------------"); - System.out.println(">>> | Prediction\t| Ground Truth\t|"); - System.out.println(">>> ---------------------------------"); - - try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { - for (Cache.Entry observation : observations) { - Integer key = observation.getKey(); - double[] val = observation.getValue(); - double groundTruth = val[0]; - - double prediction = mdl.apply(new DenseLocalOnHeapVector(preprocessor.apply(key, val))); - - System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth); - } - } - - System.out.println(">>> ---------------------------------"); - }); - - igniteThread.start(); - - igniteThread.join(); - } - } - - /** - * Fills cache with data and returns it. - * - * @param ignite Ignite instance. - * @return Filled Ignite Cache. - */ - private static IgniteCache getTestCache(Ignite ignite) { - CacheConfiguration cacheConfiguration = new CacheConfiguration<>(); - cacheConfiguration.setName("TEST_" + UUID.randomUUID()); - cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10)); - - IgniteCache cache = ignite.createCache(cacheConfiguration); - - for (int i = 0; i < data.length; i++) - cache.put(i, data[i]); - - return cache; - } -} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithLSQRTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithLSQRTrainerExample.java deleted file mode 100644 index a853092f92a08..0000000000000 --- a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithLSQRTrainerExample.java +++ /dev/null @@ -1,169 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.examples.ml.regression.linear; - -import java.util.Arrays; -import java.util.UUID; -import javax.cache.Cache; -import org.apache.ignite.Ignite; -import org.apache.ignite.IgniteCache; -import org.apache.ignite.Ignition; -import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; -import org.apache.ignite.cache.query.QueryCursor; -import org.apache.ignite.cache.query.ScanQuery; -import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.examples.ml.math.matrix.SparseDistributedMatrixExample; -import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilder; -import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; -import org.apache.ignite.ml.regressions.linear.LinearRegressionLSQRTrainer; -import org.apache.ignite.ml.regressions.linear.LinearRegressionModel; -import org.apache.ignite.thread.IgniteThread; - -/** - * Run linear regression model over distributed matrix. - * - * @see LinearRegressionLSQRTrainer - */ -public class DistributedLinearRegressionWithLSQRTrainerExample { - /** */ - private static final double[][] data = { - {8, 78, 284, 9.100000381, 109}, - {9.300000191, 68, 433, 8.699999809, 144}, - {7.5, 70, 739, 7.199999809, 113}, - {8.899999619, 96, 1792, 8.899999619, 97}, - {10.19999981, 74, 477, 8.300000191, 206}, - {8.300000191, 111, 362, 10.89999962, 124}, - {8.800000191, 77, 671, 10, 152}, - {8.800000191, 168, 636, 9.100000381, 162}, - {10.69999981, 82, 329, 8.699999809, 150}, - {11.69999981, 89, 634, 7.599999905, 134}, - {8.5, 149, 631, 10.80000019, 292}, - {8.300000191, 60, 257, 9.5, 108}, - {8.199999809, 96, 284, 8.800000191, 111}, - {7.900000095, 83, 603, 9.5, 182}, - {10.30000019, 130, 686, 8.699999809, 129}, - {7.400000095, 145, 345, 11.19999981, 158}, - {9.600000381, 112, 1357, 9.699999809, 186}, - {9.300000191, 131, 544, 9.600000381, 177}, - {10.60000038, 80, 205, 9.100000381, 127}, - {9.699999809, 130, 1264, 9.199999809, 179}, - {11.60000038, 140, 688, 8.300000191, 80}, - {8.100000381, 154, 354, 8.399999619, 103}, - {9.800000191, 118, 1632, 9.399999619, 101}, - {7.400000095, 94, 348, 9.800000191, 117}, - {9.399999619, 119, 370, 10.39999962, 88}, - {11.19999981, 153, 648, 9.899999619, 78}, - {9.100000381, 116, 366, 9.199999809, 102}, - {10.5, 97, 540, 10.30000019, 95}, - {11.89999962, 176, 680, 8.899999619, 80}, - {8.399999619, 75, 345, 9.600000381, 92}, - {5, 134, 525, 10.30000019, 126}, - {9.800000191, 161, 870, 10.39999962, 108}, - {9.800000191, 111, 669, 9.699999809, 77}, - {10.80000019, 114, 452, 9.600000381, 60}, - {10.10000038, 142, 430, 10.69999981, 71}, - {10.89999962, 238, 822, 10.30000019, 86}, - {9.199999809, 78, 190, 10.69999981, 93}, - {8.300000191, 196, 867, 9.600000381, 106}, - {7.300000191, 125, 969, 10.5, 162}, - {9.399999619, 82, 499, 7.699999809, 95}, - {9.399999619, 125, 925, 10.19999981, 91}, - {9.800000191, 129, 353, 9.899999619, 52}, - {3.599999905, 84, 288, 8.399999619, 110}, - {8.399999619, 183, 718, 10.39999962, 69}, - {10.80000019, 119, 540, 9.199999809, 57}, - {10.10000038, 180, 668, 13, 106}, - {9, 82, 347, 8.800000191, 40}, - {10, 71, 345, 9.199999809, 50}, - {11.30000019, 118, 463, 7.800000191, 35}, - {11.30000019, 121, 728, 8.199999809, 86}, - {12.80000019, 68, 383, 7.400000095, 57}, - {10, 112, 316, 10.39999962, 57}, - {6.699999809, 109, 388, 8.899999619, 94} - }; - - /** Run example. */ - public static void main(String[] args) throws InterruptedException { - System.out.println(); - System.out.println(">>> Linear regression model over sparse distributed matrix API usage example started."); - // Start ignite grid. - try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { - System.out.println(">>> Ignite grid started."); - - // Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread - // because we create ignite cache internally. - IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), - SparseDistributedMatrixExample.class.getSimpleName(), () -> { - IgniteCache dataCache = getTestCache(ignite); - - System.out.println(">>> Create new linear regression trainer object."); - LinearRegressionLSQRTrainer trainer = new LinearRegressionLSQRTrainer(); - - System.out.println(">>> Perform the training to get the model."); - LinearRegressionModel mdl = trainer.fit( - new CacheBasedDatasetBuilder<>(ignite, dataCache), - (k, v) -> Arrays.copyOfRange(v, 1, v.length), - (k, v) -> v[0] - ); - - System.out.println(">>> Linear regression model: " + mdl); - - System.out.println(">>> ---------------------------------"); - System.out.println(">>> | Prediction\t| Ground Truth\t|"); - System.out.println(">>> ---------------------------------"); - - try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { - for (Cache.Entry observation : observations) { - double[] val = observation.getValue(); - double[] inputs = Arrays.copyOfRange(val, 1, val.length); - double groundTruth = val[0]; - - double prediction = mdl.apply(new DenseLocalOnHeapVector(inputs)); - - System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth); - } - } - - System.out.println(">>> ---------------------------------"); - }); - - igniteThread.start(); - - igniteThread.join(); - } - } - - /** - * Fills cache with data and returns it. - * - * @param ignite Ignite instance. - * @return Filled Ignite Cache. - */ - private static IgniteCache getTestCache(Ignite ignite) { - CacheConfiguration cacheConfiguration = new CacheConfiguration<>(); - cacheConfiguration.setName("TEST_" + UUID.randomUUID()); - cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10)); - - IgniteCache cache = ignite.createCache(cacheConfiguration); - - for (int i = 0; i < data.length; i++) - cache.put(i, data[i]); - - return cache; - } -} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithQRTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithQRTrainerExample.java deleted file mode 100644 index 2b45aa26b87bb..0000000000000 --- a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithQRTrainerExample.java +++ /dev/null @@ -1,136 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.examples.ml.regression.linear; - -import java.util.Arrays; -import org.apache.ignite.Ignite; -import org.apache.ignite.Ignition; -import org.apache.ignite.examples.ml.math.matrix.SparseDistributedMatrixExample; -import org.apache.ignite.ml.Trainer; -import org.apache.ignite.ml.math.Matrix; -import org.apache.ignite.ml.math.Vector; -import org.apache.ignite.ml.math.impls.matrix.SparseDistributedMatrix; -import org.apache.ignite.ml.math.impls.vector.SparseDistributedVector; -import org.apache.ignite.ml.regressions.linear.LinearRegressionModel; -import org.apache.ignite.ml.regressions.linear.LinearRegressionQRTrainer; -import org.apache.ignite.thread.IgniteThread; - -/** - * Run linear regression model over distributed matrix. - * - * @see LinearRegressionQRTrainer - */ -public class DistributedLinearRegressionWithQRTrainerExample { - /** */ - private static final double[][] data = { - {8, 78, 284, 9.100000381, 109}, - {9.300000191, 68, 433, 8.699999809, 144}, - {7.5, 70, 739, 7.199999809, 113}, - {8.899999619, 96, 1792, 8.899999619, 97}, - {10.19999981, 74, 477, 8.300000191, 206}, - {8.300000191, 111, 362, 10.89999962, 124}, - {8.800000191, 77, 671, 10, 152}, - {8.800000191, 168, 636, 9.100000381, 162}, - {10.69999981, 82, 329, 8.699999809, 150}, - {11.69999981, 89, 634, 7.599999905, 134}, - {8.5, 149, 631, 10.80000019, 292}, - {8.300000191, 60, 257, 9.5, 108}, - {8.199999809, 96, 284, 8.800000191, 111}, - {7.900000095, 83, 603, 9.5, 182}, - {10.30000019, 130, 686, 8.699999809, 129}, - {7.400000095, 145, 345, 11.19999981, 158}, - {9.600000381, 112, 1357, 9.699999809, 186}, - {9.300000191, 131, 544, 9.600000381, 177}, - {10.60000038, 80, 205, 9.100000381, 127}, - {9.699999809, 130, 1264, 9.199999809, 179}, - {11.60000038, 140, 688, 8.300000191, 80}, - {8.100000381, 154, 354, 8.399999619, 103}, - {9.800000191, 118, 1632, 9.399999619, 101}, - {7.400000095, 94, 348, 9.800000191, 117}, - {9.399999619, 119, 370, 10.39999962, 88}, - {11.19999981, 153, 648, 9.899999619, 78}, - {9.100000381, 116, 366, 9.199999809, 102}, - {10.5, 97, 540, 10.30000019, 95}, - {11.89999962, 176, 680, 8.899999619, 80}, - {8.399999619, 75, 345, 9.600000381, 92}, - {5, 134, 525, 10.30000019, 126}, - {9.800000191, 161, 870, 10.39999962, 108}, - {9.800000191, 111, 669, 9.699999809, 77}, - {10.80000019, 114, 452, 9.600000381, 60}, - {10.10000038, 142, 430, 10.69999981, 71}, - {10.89999962, 238, 822, 10.30000019, 86}, - {9.199999809, 78, 190, 10.69999981, 93}, - {8.300000191, 196, 867, 9.600000381, 106}, - {7.300000191, 125, 969, 10.5, 162}, - {9.399999619, 82, 499, 7.699999809, 95}, - {9.399999619, 125, 925, 10.19999981, 91}, - {9.800000191, 129, 353, 9.899999619, 52}, - {3.599999905, 84, 288, 8.399999619, 110}, - {8.399999619, 183, 718, 10.39999962, 69}, - {10.80000019, 119, 540, 9.199999809, 57}, - {10.10000038, 180, 668, 13, 106}, - {9, 82, 347, 8.800000191, 40}, - {10, 71, 345, 9.199999809, 50}, - {11.30000019, 118, 463, 7.800000191, 35}, - {11.30000019, 121, 728, 8.199999809, 86}, - {12.80000019, 68, 383, 7.400000095, 57}, - {10, 112, 316, 10.39999962, 57}, - {6.699999809, 109, 388, 8.899999619, 94} - }; - - /** Run example. */ - public static void main(String[] args) throws InterruptedException { - System.out.println(); - System.out.println(">>> Linear regression model over sparse distributed matrix API usage example started."); - // Start ignite grid. - try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { - System.out.println(">>> Ignite grid started."); - // Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread - // because we create ignite cache internally. - IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), - SparseDistributedMatrixExample.class.getSimpleName(), () -> { - - // Create SparseDistributedMatrix, new cache will be created automagically. - System.out.println(">>> Create new SparseDistributedMatrix inside IgniteThread."); - SparseDistributedMatrix distributedMatrix = new SparseDistributedMatrix(data); - - System.out.println(">>> Create new linear regression trainer object."); - Trainer trainer = new LinearRegressionQRTrainer(); - - System.out.println(">>> Perform the training to get the model."); - LinearRegressionModel model = trainer.train(distributedMatrix); - System.out.println(">>> Linear regression model: " + model); - - System.out.println(">>> ---------------------------------"); - System.out.println(">>> | Prediction\t| Ground Truth\t|"); - System.out.println(">>> ---------------------------------"); - for (double[] observation : data) { - Vector inputs = new SparseDistributedVector(Arrays.copyOfRange(observation, 1, observation.length)); - double prediction = model.apply(inputs); - double groundTruth = observation[0]; - System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth); - } - System.out.println(">>> ---------------------------------"); - }); - - igniteThread.start(); - - igniteThread.join(); - } - } -} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithSGDTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithSGDTrainerExample.java deleted file mode 100644 index f3b2655167dc4..0000000000000 --- a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/DistributedLinearRegressionWithSGDTrainerExample.java +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.examples.ml.regression.linear; - -import java.util.Arrays; -import org.apache.ignite.Ignite; -import org.apache.ignite.Ignition; -import org.apache.ignite.examples.ml.math.matrix.SparseDistributedMatrixExample; -import org.apache.ignite.ml.Trainer; -import org.apache.ignite.ml.math.Matrix; -import org.apache.ignite.ml.math.Vector; -import org.apache.ignite.ml.math.impls.matrix.SparseDistributedMatrix; -import org.apache.ignite.ml.math.impls.vector.SparseDistributedVector; -import org.apache.ignite.ml.regressions.linear.LinearRegressionModel; -import org.apache.ignite.ml.regressions.linear.LinearRegressionQRTrainer; -import org.apache.ignite.ml.regressions.linear.LinearRegressionSGDTrainer; -import org.apache.ignite.thread.IgniteThread; - -/** - * Run linear regression model over distributed matrix. - * - * @see LinearRegressionQRTrainer - */ -public class DistributedLinearRegressionWithSGDTrainerExample { - /** */ - private static final double[][] data = { - {8, 78, 284, 9.100000381, 109}, - {9.300000191, 68, 433, 8.699999809, 144}, - {7.5, 70, 739, 7.199999809, 113}, - {8.899999619, 96, 1792, 8.899999619, 97}, - {10.19999981, 74, 477, 8.300000191, 206}, - {8.300000191, 111, 362, 10.89999962, 124}, - {8.800000191, 77, 671, 10, 152}, - {8.800000191, 168, 636, 9.100000381, 162}, - {10.69999981, 82, 329, 8.699999809, 150}, - {11.69999981, 89, 634, 7.599999905, 134}, - {8.5, 149, 631, 10.80000019, 292}, - {8.300000191, 60, 257, 9.5, 108}, - {8.199999809, 96, 284, 8.800000191, 111}, - {7.900000095, 83, 603, 9.5, 182}, - {10.30000019, 130, 686, 8.699999809, 129}, - {7.400000095, 145, 345, 11.19999981, 158}, - {9.600000381, 112, 1357, 9.699999809, 186}, - {9.300000191, 131, 544, 9.600000381, 177}, - {10.60000038, 80, 205, 9.100000381, 127}, - {9.699999809, 130, 1264, 9.199999809, 179}, - {11.60000038, 140, 688, 8.300000191, 80}, - {8.100000381, 154, 354, 8.399999619, 103}, - {9.800000191, 118, 1632, 9.399999619, 101}, - {7.400000095, 94, 348, 9.800000191, 117}, - {9.399999619, 119, 370, 10.39999962, 88}, - {11.19999981, 153, 648, 9.899999619, 78}, - {9.100000381, 116, 366, 9.199999809, 102}, - {10.5, 97, 540, 10.30000019, 95}, - {11.89999962, 176, 680, 8.899999619, 80}, - {8.399999619, 75, 345, 9.600000381, 92}, - {5, 134, 525, 10.30000019, 126}, - {9.800000191, 161, 870, 10.39999962, 108}, - {9.800000191, 111, 669, 9.699999809, 77}, - {10.80000019, 114, 452, 9.600000381, 60}, - {10.10000038, 142, 430, 10.69999981, 71}, - {10.89999962, 238, 822, 10.30000019, 86}, - {9.199999809, 78, 190, 10.69999981, 93}, - {8.300000191, 196, 867, 9.600000381, 106}, - {7.300000191, 125, 969, 10.5, 162}, - {9.399999619, 82, 499, 7.699999809, 95}, - {9.399999619, 125, 925, 10.19999981, 91}, - {9.800000191, 129, 353, 9.899999619, 52}, - {3.599999905, 84, 288, 8.399999619, 110}, - {8.399999619, 183, 718, 10.39999962, 69}, - {10.80000019, 119, 540, 9.199999809, 57}, - {10.10000038, 180, 668, 13, 106}, - {9, 82, 347, 8.800000191, 40}, - {10, 71, 345, 9.199999809, 50}, - {11.30000019, 118, 463, 7.800000191, 35}, - {11.30000019, 121, 728, 8.199999809, 86}, - {12.80000019, 68, 383, 7.400000095, 57}, - {10, 112, 316, 10.39999962, 57}, - {6.699999809, 109, 388, 8.899999619, 94} - }; - - /** Run example. */ - public static void main(String[] args) throws InterruptedException { - System.out.println(); - System.out.println(">>> Linear regression model over sparse distributed matrix API usage example started."); - // Start ignite grid. - try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { - System.out.println(">>> Ignite grid started."); - // Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread - // because we create ignite cache internally. - IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), - SparseDistributedMatrixExample.class.getSimpleName(), () -> { - - // Create SparseDistributedMatrix, new cache will be created automagically. - System.out.println(">>> Create new SparseDistributedMatrix inside IgniteThread."); - SparseDistributedMatrix distributedMatrix = new SparseDistributedMatrix(data); - - System.out.println(">>> Create new linear regression trainer object."); - Trainer trainer = new LinearRegressionSGDTrainer(100_000, 1e-12); - - System.out.println(">>> Perform the training to get the model."); - LinearRegressionModel model = trainer.train(distributedMatrix); - System.out.println(">>> Linear regression model: " + model); - - System.out.println(">>> ---------------------------------"); - System.out.println(">>> | Prediction\t| Ground Truth\t|"); - System.out.println(">>> ---------------------------------"); - for (double[] observation : data) { - Vector inputs = new SparseDistributedVector(Arrays.copyOfRange(observation, 1, observation.length)); - double prediction = model.apply(inputs); - double groundTruth = observation[0]; - System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth); - } - System.out.println(">>> ---------------------------------"); - }); - - igniteThread.start(); - - igniteThread.join(); - } - } -} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerExample.java new file mode 100644 index 0000000000000..276d43fcd67ef --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerExample.java @@ -0,0 +1,169 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.regression.linear; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; +import org.apache.ignite.ml.regressions.linear.LinearRegressionLSQRTrainer; +import org.apache.ignite.ml.regressions.linear.LinearRegressionModel; +import org.apache.ignite.thread.IgniteThread; + +import javax.cache.Cache; +import java.util.Arrays; +import java.util.UUID; + +/** + * Run linear regression model over distributed matrix. + * + * @see LinearRegressionLSQRTrainer + */ +public class LinearRegressionLSQRTrainerExample { + /** */ + private static final double[][] data = { + {8, 78, 284, 9.100000381, 109}, + {9.300000191, 68, 433, 8.699999809, 144}, + {7.5, 70, 739, 7.199999809, 113}, + {8.899999619, 96, 1792, 8.899999619, 97}, + {10.19999981, 74, 477, 8.300000191, 206}, + {8.300000191, 111, 362, 10.89999962, 124}, + {8.800000191, 77, 671, 10, 152}, + {8.800000191, 168, 636, 9.100000381, 162}, + {10.69999981, 82, 329, 8.699999809, 150}, + {11.69999981, 89, 634, 7.599999905, 134}, + {8.5, 149, 631, 10.80000019, 292}, + {8.300000191, 60, 257, 9.5, 108}, + {8.199999809, 96, 284, 8.800000191, 111}, + {7.900000095, 83, 603, 9.5, 182}, + {10.30000019, 130, 686, 8.699999809, 129}, + {7.400000095, 145, 345, 11.19999981, 158}, + {9.600000381, 112, 1357, 9.699999809, 186}, + {9.300000191, 131, 544, 9.600000381, 177}, + {10.60000038, 80, 205, 9.100000381, 127}, + {9.699999809, 130, 1264, 9.199999809, 179}, + {11.60000038, 140, 688, 8.300000191, 80}, + {8.100000381, 154, 354, 8.399999619, 103}, + {9.800000191, 118, 1632, 9.399999619, 101}, + {7.400000095, 94, 348, 9.800000191, 117}, + {9.399999619, 119, 370, 10.39999962, 88}, + {11.19999981, 153, 648, 9.899999619, 78}, + {9.100000381, 116, 366, 9.199999809, 102}, + {10.5, 97, 540, 10.30000019, 95}, + {11.89999962, 176, 680, 8.899999619, 80}, + {8.399999619, 75, 345, 9.600000381, 92}, + {5, 134, 525, 10.30000019, 126}, + {9.800000191, 161, 870, 10.39999962, 108}, + {9.800000191, 111, 669, 9.699999809, 77}, + {10.80000019, 114, 452, 9.600000381, 60}, + {10.10000038, 142, 430, 10.69999981, 71}, + {10.89999962, 238, 822, 10.30000019, 86}, + {9.199999809, 78, 190, 10.69999981, 93}, + {8.300000191, 196, 867, 9.600000381, 106}, + {7.300000191, 125, 969, 10.5, 162}, + {9.399999619, 82, 499, 7.699999809, 95}, + {9.399999619, 125, 925, 10.19999981, 91}, + {9.800000191, 129, 353, 9.899999619, 52}, + {3.599999905, 84, 288, 8.399999619, 110}, + {8.399999619, 183, 718, 10.39999962, 69}, + {10.80000019, 119, 540, 9.199999809, 57}, + {10.10000038, 180, 668, 13, 106}, + {9, 82, 347, 8.800000191, 40}, + {10, 71, 345, 9.199999809, 50}, + {11.30000019, 118, 463, 7.800000191, 35}, + {11.30000019, 121, 728, 8.199999809, 86}, + {12.80000019, 68, 383, 7.400000095, 57}, + {10, 112, 316, 10.39999962, 57}, + {6.699999809, 109, 388, 8.899999619, 94} + }; + + /** Run example. */ + public static void main(String[] args) throws InterruptedException { + System.out.println(); + System.out.println(">>> Linear regression model over sparse distributed matrix API usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + // Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread + // because we create ignite cache internally. + IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), + LinearRegressionLSQRTrainerExample.class.getSimpleName(), () -> { + IgniteCache dataCache = getTestCache(ignite); + + System.out.println(">>> Create new linear regression trainer object."); + LinearRegressionLSQRTrainer trainer = new LinearRegressionLSQRTrainer(); + + System.out.println(">>> Perform the training to get the model."); + LinearRegressionModel mdl = trainer.fit( + ignite, + dataCache, + (k, v) -> Arrays.copyOfRange(v, 1, v.length), + (k, v) -> v[0] + ); + + System.out.println(">>> Linear regression model: " + mdl); + + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Ground Truth\t|"); + System.out.println(">>> ---------------------------------"); + + try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { + for (Cache.Entry observation : observations) { + double[] val = observation.getValue(); + double[] inputs = Arrays.copyOfRange(val, 1, val.length); + double groundTruth = val[0]; + + double prediction = mdl.apply(new DenseLocalOnHeapVector(inputs)); + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth); + } + } + + System.out.println(">>> ---------------------------------"); + }); + + igniteThread.start(); + + igniteThread.join(); + } + } + + /** + * Fills cache with data and returns it. + * + * @param ignite Ignite instance. + * @return Filled Ignite Cache. + */ + private static IgniteCache getTestCache(Ignite ignite) { + CacheConfiguration cacheConfiguration = new CacheConfiguration<>(); + cacheConfiguration.setName("TEST_" + UUID.randomUUID()); + cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache cache = ignite.createCache(cacheConfiguration); + + for (int i = 0; i < data.length; i++) + cache.put(i, data[i]); + + return cache; + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerWithNormalizationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerWithNormalizationExample.java new file mode 100644 index 0000000000000..0358f44135e00 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionLSQRTrainerWithNormalizationExample.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.regression.linear; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.examples.ml.math.matrix.SparseDistributedMatrixExample; +import org.apache.ignite.ml.math.functions.IgniteBiFunction; +import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; +import org.apache.ignite.ml.preprocessing.normalization.NormalizationPreprocessor; +import org.apache.ignite.ml.preprocessing.normalization.NormalizationTrainer; +import org.apache.ignite.ml.regressions.linear.LinearRegressionLSQRTrainer; +import org.apache.ignite.ml.regressions.linear.LinearRegressionModel; +import org.apache.ignite.thread.IgniteThread; + +import javax.cache.Cache; +import java.util.Arrays; +import java.util.UUID; + +/** + * Run linear regression model over distributed matrix. + * + * @see LinearRegressionLSQRTrainer + * @see NormalizationTrainer + * @see NormalizationPreprocessor + */ +public class LinearRegressionLSQRTrainerWithNormalizationExample { + /** */ + private static final double[][] data = { + {8, 78, 284, 9.100000381, 109}, + {9.300000191, 68, 433, 8.699999809, 144}, + {7.5, 70, 739, 7.199999809, 113}, + {8.899999619, 96, 1792, 8.899999619, 97}, + {10.19999981, 74, 477, 8.300000191, 206}, + {8.300000191, 111, 362, 10.89999962, 124}, + {8.800000191, 77, 671, 10, 152}, + {8.800000191, 168, 636, 9.100000381, 162}, + {10.69999981, 82, 329, 8.699999809, 150}, + {11.69999981, 89, 634, 7.599999905, 134}, + {8.5, 149, 631, 10.80000019, 292}, + {8.300000191, 60, 257, 9.5, 108}, + {8.199999809, 96, 284, 8.800000191, 111}, + {7.900000095, 83, 603, 9.5, 182}, + {10.30000019, 130, 686, 8.699999809, 129}, + {7.400000095, 145, 345, 11.19999981, 158}, + {9.600000381, 112, 1357, 9.699999809, 186}, + {9.300000191, 131, 544, 9.600000381, 177}, + {10.60000038, 80, 205, 9.100000381, 127}, + {9.699999809, 130, 1264, 9.199999809, 179}, + {11.60000038, 140, 688, 8.300000191, 80}, + {8.100000381, 154, 354, 8.399999619, 103}, + {9.800000191, 118, 1632, 9.399999619, 101}, + {7.400000095, 94, 348, 9.800000191, 117}, + {9.399999619, 119, 370, 10.39999962, 88}, + {11.19999981, 153, 648, 9.899999619, 78}, + {9.100000381, 116, 366, 9.199999809, 102}, + {10.5, 97, 540, 10.30000019, 95}, + {11.89999962, 176, 680, 8.899999619, 80}, + {8.399999619, 75, 345, 9.600000381, 92}, + {5, 134, 525, 10.30000019, 126}, + {9.800000191, 161, 870, 10.39999962, 108}, + {9.800000191, 111, 669, 9.699999809, 77}, + {10.80000019, 114, 452, 9.600000381, 60}, + {10.10000038, 142, 430, 10.69999981, 71}, + {10.89999962, 238, 822, 10.30000019, 86}, + {9.199999809, 78, 190, 10.69999981, 93}, + {8.300000191, 196, 867, 9.600000381, 106}, + {7.300000191, 125, 969, 10.5, 162}, + {9.399999619, 82, 499, 7.699999809, 95}, + {9.399999619, 125, 925, 10.19999981, 91}, + {9.800000191, 129, 353, 9.899999619, 52}, + {3.599999905, 84, 288, 8.399999619, 110}, + {8.399999619, 183, 718, 10.39999962, 69}, + {10.80000019, 119, 540, 9.199999809, 57}, + {10.10000038, 180, 668, 13, 106}, + {9, 82, 347, 8.800000191, 40}, + {10, 71, 345, 9.199999809, 50}, + {11.30000019, 118, 463, 7.800000191, 35}, + {11.30000019, 121, 728, 8.199999809, 86}, + {12.80000019, 68, 383, 7.400000095, 57}, + {10, 112, 316, 10.39999962, 57}, + {6.699999809, 109, 388, 8.899999619, 94} + }; + + /** Run example. */ + public static void main(String[] args) throws InterruptedException { + System.out.println(); + System.out.println(">>> Linear regression model over sparse distributed matrix API usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + // Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread + // because we create ignite cache internally. + IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), + SparseDistributedMatrixExample.class.getSimpleName(), () -> { + IgniteCache dataCache = getTestCache(ignite); + + System.out.println(">>> Create new normalization trainer object."); + NormalizationTrainer normalizationTrainer = new NormalizationTrainer<>(); + + System.out.println(">>> Perform the training to get the normalization preprocessor."); + IgniteBiFunction preprocessor = normalizationTrainer.fit( + ignite, + dataCache, + (k, v) -> Arrays.copyOfRange(v, 1, v.length) + ); + + System.out.println(">>> Create new linear regression trainer object."); + LinearRegressionLSQRTrainer trainer = new LinearRegressionLSQRTrainer(); + + System.out.println(">>> Perform the training to get the model."); + LinearRegressionModel mdl = trainer.fit(ignite, dataCache, preprocessor, (k, v) -> v[0]); + + System.out.println(">>> Linear regression model: " + mdl); + + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Ground Truth\t|"); + System.out.println(">>> ---------------------------------"); + + try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { + for (Cache.Entry observation : observations) { + Integer key = observation.getKey(); + double[] val = observation.getValue(); + double groundTruth = val[0]; + + double prediction = mdl.apply(new DenseLocalOnHeapVector(preprocessor.apply(key, val))); + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth); + } + } + + System.out.println(">>> ---------------------------------"); + }); + + igniteThread.start(); + + igniteThread.join(); + } + } + + /** + * Fills cache with data and returns it. + * + * @param ignite Ignite instance. + * @return Filled Ignite Cache. + */ + private static IgniteCache getTestCache(Ignite ignite) { + CacheConfiguration cacheConfiguration = new CacheConfiguration<>(); + cacheConfiguration.setName("TEST_" + UUID.randomUUID()); + cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache cache = ignite.createCache(cacheConfiguration); + + for (int i = 0; i < data.length; i++) + cache.put(i, data[i]); + + return cache; + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionSGDTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionSGDTrainerExample.java new file mode 100644 index 0000000000000..ce6ad3b4df287 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/regression/linear/LinearRegressionSGDTrainerExample.java @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.regression.linear; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cache.query.QueryCursor; +import org.apache.ignite.cache.query.ScanQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; +import org.apache.ignite.ml.optimization.updatecalculators.RPropParameterUpdate; +import org.apache.ignite.ml.optimization.updatecalculators.RPropUpdateCalculator; +import org.apache.ignite.ml.regressions.linear.LinearRegressionModel; +import org.apache.ignite.ml.regressions.linear.LinearRegressionSGDTrainer; +import org.apache.ignite.ml.nn.UpdatesStrategy; +import org.apache.ignite.thread.IgniteThread; + +import javax.cache.Cache; +import java.util.Arrays; +import java.util.UUID; + +/** + * Run linear regression model over distributed matrix. + * + * @see LinearRegressionSGDTrainer + */ +public class LinearRegressionSGDTrainerExample { + /** */ + private static final double[][] data = { + {8, 78, 284, 9.100000381, 109}, + {9.300000191, 68, 433, 8.699999809, 144}, + {7.5, 70, 739, 7.199999809, 113}, + {8.899999619, 96, 1792, 8.899999619, 97}, + {10.19999981, 74, 477, 8.300000191, 206}, + {8.300000191, 111, 362, 10.89999962, 124}, + {8.800000191, 77, 671, 10, 152}, + {8.800000191, 168, 636, 9.100000381, 162}, + {10.69999981, 82, 329, 8.699999809, 150}, + {11.69999981, 89, 634, 7.599999905, 134}, + {8.5, 149, 631, 10.80000019, 292}, + {8.300000191, 60, 257, 9.5, 108}, + {8.199999809, 96, 284, 8.800000191, 111}, + {7.900000095, 83, 603, 9.5, 182}, + {10.30000019, 130, 686, 8.699999809, 129}, + {7.400000095, 145, 345, 11.19999981, 158}, + {9.600000381, 112, 1357, 9.699999809, 186}, + {9.300000191, 131, 544, 9.600000381, 177}, + {10.60000038, 80, 205, 9.100000381, 127}, + {9.699999809, 130, 1264, 9.199999809, 179}, + {11.60000038, 140, 688, 8.300000191, 80}, + {8.100000381, 154, 354, 8.399999619, 103}, + {9.800000191, 118, 1632, 9.399999619, 101}, + {7.400000095, 94, 348, 9.800000191, 117}, + {9.399999619, 119, 370, 10.39999962, 88}, + {11.19999981, 153, 648, 9.899999619, 78}, + {9.100000381, 116, 366, 9.199999809, 102}, + {10.5, 97, 540, 10.30000019, 95}, + {11.89999962, 176, 680, 8.899999619, 80}, + {8.399999619, 75, 345, 9.600000381, 92}, + {5, 134, 525, 10.30000019, 126}, + {9.800000191, 161, 870, 10.39999962, 108}, + {9.800000191, 111, 669, 9.699999809, 77}, + {10.80000019, 114, 452, 9.600000381, 60}, + {10.10000038, 142, 430, 10.69999981, 71}, + {10.89999962, 238, 822, 10.30000019, 86}, + {9.199999809, 78, 190, 10.69999981, 93}, + {8.300000191, 196, 867, 9.600000381, 106}, + {7.300000191, 125, 969, 10.5, 162}, + {9.399999619, 82, 499, 7.699999809, 95}, + {9.399999619, 125, 925, 10.19999981, 91}, + {9.800000191, 129, 353, 9.899999619, 52}, + {3.599999905, 84, 288, 8.399999619, 110}, + {8.399999619, 183, 718, 10.39999962, 69}, + {10.80000019, 119, 540, 9.199999809, 57}, + {10.10000038, 180, 668, 13, 106}, + {9, 82, 347, 8.800000191, 40}, + {10, 71, 345, 9.199999809, 50}, + {11.30000019, 118, 463, 7.800000191, 35}, + {11.30000019, 121, 728, 8.199999809, 86}, + {12.80000019, 68, 383, 7.400000095, 57}, + {10, 112, 316, 10.39999962, 57}, + {6.699999809, 109, 388, 8.899999619, 94} + }; + + /** Run example. */ + public static void main(String[] args) throws InterruptedException { + System.out.println(); + System.out.println(">>> Linear regression model over sparse distributed matrix API usage example started."); + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + // Create IgniteThread, we must work with SparseDistributedMatrix inside IgniteThread + // because we create ignite cache internally. + IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), + LinearRegressionSGDTrainerExample.class.getSimpleName(), () -> { + + IgniteCache dataCache = getTestCache(ignite); + + System.out.println(">>> Create new linear regression trainer object."); + LinearRegressionSGDTrainer trainer = new LinearRegressionSGDTrainer<>(new UpdatesStrategy<>( + new RPropUpdateCalculator(), + RPropParameterUpdate::sumLocal, + RPropParameterUpdate::avg + ), 100000, 10, 100, 123L); + + System.out.println(">>> Perform the training to get the model."); + LinearRegressionModel mdl = trainer.fit( + ignite, + dataCache, + (k, v) -> Arrays.copyOfRange(v, 1, v.length), + (k, v) -> v[0] + ); + + System.out.println(">>> Linear regression model: " + mdl); + + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Ground Truth\t|"); + System.out.println(">>> ---------------------------------"); + + try (QueryCursor> observations = dataCache.query(new ScanQuery<>())) { + for (Cache.Entry observation : observations) { + double[] val = observation.getValue(); + double[] inputs = Arrays.copyOfRange(val, 1, val.length); + double groundTruth = val[0]; + + double prediction = mdl.apply(new DenseLocalOnHeapVector(inputs)); + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", prediction, groundTruth); + } + } + + System.out.println(">>> ---------------------------------"); + }); + + igniteThread.start(); + + igniteThread.join(); + } + } + + /** + * Fills cache with data and returns it. + * + * @param ignite Ignite instance. + * @return Filled Ignite Cache. + */ + private static IgniteCache getTestCache(Ignite ignite) { + CacheConfiguration cacheConfiguration = new CacheConfiguration<>(); + cacheConfiguration.setName("TEST_" + UUID.randomUUID()); + cacheConfiguration.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache cache = ignite.createCache(cacheConfiguration); + + for (int i = 0; i < data.length; i++) + cache.put(i, data[i]); + + return cache; + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/svm/binary/SVMBinaryClassificationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/svm/binary/SVMBinaryClassificationExample.java index f8bf521637010..ce37112978dd5 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/svm/binary/SVMBinaryClassificationExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/svm/binary/SVMBinaryClassificationExample.java @@ -17,9 +17,6 @@ package org.apache.ignite.examples.ml.svm.binary; -import java.util.Arrays; -import java.util.UUID; -import javax.cache.Cache; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.Ignition; @@ -27,12 +24,15 @@ import org.apache.ignite.cache.query.QueryCursor; import org.apache.ignite.cache.query.ScanQuery; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilder; import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; import org.apache.ignite.ml.svm.SVMLinearBinaryClassificationModel; import org.apache.ignite.ml.svm.SVMLinearBinaryClassificationTrainer; import org.apache.ignite.thread.IgniteThread; +import javax.cache.Cache; +import java.util.Arrays; +import java.util.UUID; + /** * Run SVM binary-class classification model over distributed dataset. * @@ -54,7 +54,8 @@ public static void main(String[] args) throws InterruptedException { SVMLinearBinaryClassificationTrainer trainer = new SVMLinearBinaryClassificationTrainer(); SVMLinearBinaryClassificationModel mdl = trainer.fit( - new CacheBasedDatasetBuilder<>(ignite, dataCache), + ignite, + dataCache, (k, v) -> Arrays.copyOfRange(v, 1, v.length), (k, v) -> v[0] ); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/svm/multiclass/SVMMultiClassClassificationExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/svm/multiclass/SVMMultiClassClassificationExample.java index f8281e489f69d..4054201ee6b96 100644 --- a/examples/src/main/java/org/apache/ignite/examples/ml/svm/multiclass/SVMMultiClassClassificationExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/ml/svm/multiclass/SVMMultiClassClassificationExample.java @@ -17,9 +17,6 @@ package org.apache.ignite.examples.ml.svm.multiclass; -import java.util.Arrays; -import java.util.UUID; -import javax.cache.Cache; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCache; import org.apache.ignite.Ignition; @@ -27,14 +24,17 @@ import org.apache.ignite.cache.query.QueryCursor; import org.apache.ignite.cache.query.ScanQuery; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.ml.dataset.impl.cache.CacheBasedDatasetBuilder; +import org.apache.ignite.ml.math.functions.IgniteBiFunction; import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; -import org.apache.ignite.ml.preprocessing.normalization.NormalizationPreprocessor; import org.apache.ignite.ml.preprocessing.normalization.NormalizationTrainer; import org.apache.ignite.ml.svm.SVMLinearMultiClassClassificationModel; import org.apache.ignite.ml.svm.SVMLinearMultiClassClassificationTrainer; import org.apache.ignite.thread.IgniteThread; +import javax.cache.Cache; +import java.util.Arrays; +import java.util.UUID; + /** * Run SVM multi-class classification trainer over distributed dataset to build two models: * one with normalization and one without normalization. @@ -57,7 +57,8 @@ public static void main(String[] args) throws InterruptedException { SVMLinearMultiClassClassificationTrainer trainer = new SVMLinearMultiClassClassificationTrainer(); SVMLinearMultiClassClassificationModel mdl = trainer.fit( - new CacheBasedDatasetBuilder<>(ignite, dataCache), + ignite, + dataCache, (k, v) -> Arrays.copyOfRange(v, 1, v.length), (k, v) -> v[0] ); @@ -67,14 +68,15 @@ public static void main(String[] args) throws InterruptedException { NormalizationTrainer normalizationTrainer = new NormalizationTrainer<>(); - NormalizationPreprocessor preprocessor = normalizationTrainer.fit( - new CacheBasedDatasetBuilder<>(ignite, dataCache), - (k, v) -> Arrays.copyOfRange(v, 1, v.length), - 5 + IgniteBiFunction preprocessor = normalizationTrainer.fit( + ignite, + dataCache, + (k, v) -> Arrays.copyOfRange(v, 1, v.length) ); SVMLinearMultiClassClassificationModel mdlWithNormalization = trainer.fit( - new CacheBasedDatasetBuilder<>(ignite, dataCache), + ignite, + dataCache, preprocessor, (k, v) -> v[0] ); diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java new file mode 100644 index 0000000000000..1ecf460148ddb --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeClassificationTrainerExample.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.tree; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.tree.DecisionTreeClassificationTrainer; +import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.thread.IgniteThread; + +import java.util.Random; + +/** + * Example of using distributed {@link DecisionTreeClassificationTrainer}. + */ +public class DecisionTreeClassificationTrainerExample { + /** + * Executes example. + * + * @param args Command line arguments, none required. + */ + public static void main(String... args) throws InterruptedException { + System.out.println(">>> Decision tree classification trainer example started."); + + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), + DecisionTreeClassificationTrainerExample.class.getSimpleName(), () -> { + + // Create cache with training data. + CacheConfiguration trainingSetCfg = new CacheConfiguration<>(); + trainingSetCfg.setName("TRAINING_SET"); + trainingSetCfg.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache trainingSet = ignite.createCache(trainingSetCfg); + + Random rnd = new Random(0); + + // Fill training data. + for (int i = 0; i < 1000; i++) + trainingSet.put(i, generatePoint(rnd)); + + // Create classification trainer. + DecisionTreeClassificationTrainer trainer = new DecisionTreeClassificationTrainer(4, 0); + + // Train decision tree model. + DecisionTreeNode mdl = trainer.fit( + ignite, + trainingSet, + (k, v) -> new double[]{v.x, v.y}, + (k, v) -> v.lb + ); + + // Calculate score. + int correctPredictions = 0; + for (int i = 0; i < 1000; i++) { + LabeledPoint pnt = generatePoint(rnd); + + double prediction = mdl.apply(new double[]{pnt.x, pnt.y}); + + if (prediction == pnt.lb) + correctPredictions++; + } + + System.out.println(">>> Accuracy: " + correctPredictions / 10.0 + "%"); + + System.out.println(">>> Decision tree classification trainer example completed."); + }); + + igniteThread.start(); + + igniteThread.join(); + } + } + + /** + * Generate point with {@code x} in (-0.5, 0.5) and {@code y} in the same interval. If {@code x * y > 0} then label + * is 1, otherwise 0. + * + * @param rnd Random. + * @return Point with label. + */ + private static LabeledPoint generatePoint(Random rnd) { + + double x = rnd.nextDouble() - 0.5; + double y = rnd.nextDouble() - 0.5; + + return new LabeledPoint(x, y, x * y > 0 ? 1 : 0); + } + + /** Point data class. */ + private static class Point { + /** X coordinate. */ + final double x; + + /** Y coordinate. */ + final double y; + + /** + * Constructs a new instance of point. + * + * @param x X coordinate. + * @param y Y coordinate. + */ + Point(double x, double y) { + this.x = x; + this.y = y; + } + } + + /** Labeled point data class. */ + private static class LabeledPoint extends Point { + /** Point label. */ + final double lb; + + /** + * Constructs a new instance of labeled point data. + * + * @param x X coordinate. + * @param y Y coordinate. + * @param lb Point label. + */ + LabeledPoint(double x, double y, double lb) { + super(x, y); + this.lb = lb; + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java new file mode 100644 index 0000000000000..19b15f3bbf3ca --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/DecisionTreeRegressionTrainerExample.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.ml.tree; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.ml.tree.DecisionTreeNode; +import org.apache.ignite.ml.tree.DecisionTreeRegressionTrainer; +import org.apache.ignite.thread.IgniteThread; + +/** + * Example of using distributed {@link DecisionTreeRegressionTrainer}. + */ +public class DecisionTreeRegressionTrainerExample { + /** + * Executes example. + * + * @param args Command line arguments, none required. + */ + public static void main(String... args) throws InterruptedException { + System.out.println(">>> Decision tree regression trainer example started."); + + // Start ignite grid. + try (Ignite ignite = Ignition.start("examples/config/example-ignite.xml")) { + System.out.println(">>> Ignite grid started."); + + IgniteThread igniteThread = new IgniteThread(ignite.configuration().getIgniteInstanceName(), + DecisionTreeRegressionTrainerExample.class.getSimpleName(), () -> { + + // Create cache with training data. + CacheConfiguration trainingSetCfg = new CacheConfiguration<>(); + trainingSetCfg.setName("TRAINING_SET"); + trainingSetCfg.setAffinity(new RendezvousAffinityFunction(false, 10)); + + IgniteCache trainingSet = ignite.createCache(trainingSetCfg); + + // Fill training data. + generatePoints(trainingSet); + + // Create regression trainer. + DecisionTreeRegressionTrainer trainer = new DecisionTreeRegressionTrainer(10, 0); + + // Train decision tree model. + DecisionTreeNode mdl = trainer.fit( + ignite, + trainingSet, + (k, v) -> new double[] {v.x}, + (k, v) -> v.y + ); + + System.out.println(">>> Linear regression model: " + mdl); + + System.out.println(">>> ---------------------------------"); + System.out.println(">>> | Prediction\t| Ground Truth\t|"); + System.out.println(">>> ---------------------------------"); + + // Calculate score. + for (int x = 0; x < 10; x++) { + double predicted = mdl.apply(new double[] {x}); + + System.out.printf(">>> | %.4f\t\t| %.4f\t\t|\n", predicted, Math.sin(x)); + } + + System.out.println(">>> ---------------------------------"); + + System.out.println(">>> Decision tree regression trainer example completed."); + }); + + igniteThread.start(); + + igniteThread.join(); + } + } + + /** + * Generates {@code sin(x)} on interval [0, 10) and loads into the specified cache. + */ + private static void generatePoints(IgniteCache trainingSet) { + for (int i = 0; i < 1000; i++) { + double x = i / 100.0; + double y = Math.sin(x); + + trainingSet.put(i, new Point(x, y)); + } + } + + /** Point data class. */ + private static class Point { + /** X coordinate. */ + final double x; + + /** Y coordinate. */ + final double y; + + /** + * Constructs a new instance of point. + * + * @param x X coordinate. + * @param y Y coordinate. + */ + Point(double x, double y) { + this.x = x; + this.y = y; + } + } +} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/tree/package-info.java b/examples/src/main/java/org/apache/ignite/examples/ml/tree/package-info.java new file mode 100644 index 0000000000000..d8d9de60a2169 --- /dev/null +++ b/examples/src/main/java/org/apache/ignite/examples/ml/tree/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * + * Decision trees examples. + */ +package org.apache.ignite.examples.ml.tree; diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/trees/DecisionTreesExample.java b/examples/src/main/java/org/apache/ignite/examples/ml/trees/DecisionTreesExample.java deleted file mode 100644 index b1b2c421a9f5f..0000000000000 --- a/examples/src/main/java/org/apache/ignite/examples/ml/trees/DecisionTreesExample.java +++ /dev/null @@ -1,354 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.examples.ml.trees; - -import java.io.FileInputStream; -import java.io.FileOutputStream; -import java.io.IOException; -import java.net.URL; -import java.nio.channels.Channels; -import java.nio.channels.ReadableByteChannel; -import java.util.Collection; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import java.util.Random; -import java.util.Scanner; -import java.util.function.Function; -import java.util.stream.Collectors; -import java.util.stream.Stream; -import java.util.zip.GZIPInputStream; -import org.apache.commons.cli.BasicParser; -import org.apache.commons.cli.CommandLine; -import org.apache.commons.cli.CommandLineParser; -import org.apache.commons.cli.Option; -import org.apache.commons.cli.OptionBuilder; -import org.apache.commons.cli.Options; -import org.apache.commons.cli.ParseException; -import org.apache.ignite.Ignite; -import org.apache.ignite.IgniteCache; -import org.apache.ignite.IgniteDataStreamer; -import org.apache.ignite.Ignition; -import org.apache.ignite.cache.CacheWriteSynchronizationMode; -import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.examples.ExampleNodeStartup; -import org.apache.ignite.examples.ml.MLExamplesCommonArgs; -import org.apache.ignite.internal.util.IgniteUtils; -import org.apache.ignite.lang.IgniteBiTuple; -import org.apache.ignite.ml.Model; -import org.apache.ignite.ml.estimators.Estimators; -import org.apache.ignite.ml.math.Vector; -import org.apache.ignite.ml.math.functions.IgniteTriFunction; -import org.apache.ignite.ml.math.impls.vector.DenseLocalOnHeapVector; -import org.apache.ignite.ml.trees.models.DecisionTreeModel; -import org.apache.ignite.ml.trees.trainers.columnbased.BiIndex; -import org.apache.ignite.ml.trees.trainers.columnbased.BiIndexedCacheColumnDecisionTreeTrainerInput; -import org.apache.ignite.ml.trees.trainers.columnbased.ColumnDecisionTreeTrainer; -import org.apache.ignite.ml.trees.trainers.columnbased.contsplitcalcs.ContinuousSplitCalculators; -import org.apache.ignite.ml.trees.trainers.columnbased.contsplitcalcs.GiniSplitCalculator; -import org.apache.ignite.ml.trees.trainers.columnbased.regcalcs.RegionCalculators; -import org.apache.ignite.ml.util.MnistUtils; -import org.jetbrains.annotations.NotNull; - -/** - *

- * Example of usage of decision trees algorithm for MNIST dataset - * (it can be found here: http://yann.lecun.com/exdb/mnist/).

- *

- * Remote nodes should always be started with special configuration file which - * enables P2P class loading: {@code 'ignite.{sh|bat} examples/config/example-ignite.xml'}.

- *

- * Alternatively you can run {@link ExampleNodeStartup} in another JVM which will start node - * with {@code examples/config/example-ignite.xml} configuration.

- *

- * It is recommended to start at least one node prior to launching this example if you intend - * to run it with default memory settings.

- *

- * This example should be run with program arguments, for example - * -cfg examples/config/example-ignite.xml.

- *

- * -cfg specifies path to a config path.

- */ -public class DecisionTreesExample { - /** Name of parameter specifying path of Ignite config. */ - private static final String CONFIG = "cfg"; - - /** Default config path. */ - private static final String DEFAULT_CONFIG = "examples/config/example-ignite.xml"; - - /** - * Folder in which MNIST dataset is expected. - */ - private static String MNIST_DIR = "examples/src/main/resources/"; - - /** - * Key for MNIST training images. - */ - private static String MNIST_TRAIN_IMAGES = "train_images"; - - /** - * Key for MNIST training labels. - */ - private static String MNIST_TRAIN_LABELS = "train_labels"; - - /** - * Key for MNIST test images. - */ - private static String MNIST_TEST_IMAGES = "test_images"; - - /** - * Key for MNIST test labels. - */ - private static String MNIST_TEST_LABELS = "test_labels"; - - /** - * Launches example. - * - * @param args Program arguments. - */ - public static void main(String[] args) throws IOException { - System.out.println(">>> Decision trees example started."); - - String igniteCfgPath; - - CommandLineParser parser = new BasicParser(); - - String trainingImagesPath; - String trainingLabelsPath; - - String testImagesPath; - String testLabelsPath; - - Map mnistPaths = new HashMap<>(); - - mnistPaths.put(MNIST_TRAIN_IMAGES, "train-images-idx3-ubyte"); - mnistPaths.put(MNIST_TRAIN_LABELS, "train-labels-idx1-ubyte"); - mnistPaths.put(MNIST_TEST_IMAGES, "t10k-images-idx3-ubyte"); - mnistPaths.put(MNIST_TEST_LABELS, "t10k-labels-idx1-ubyte"); - - try { - // Parse the command line arguments. - CommandLine line = parser.parse(buildOptions(), args); - - if (line.hasOption(MLExamplesCommonArgs.UNATTENDED)) { - System.out.println(">>> Skipped example execution because 'unattended' mode is used."); - System.out.println(">>> Decision trees example finished."); - return; - } - - igniteCfgPath = line.getOptionValue(CONFIG, DEFAULT_CONFIG); - } - catch (ParseException e) { - e.printStackTrace(); - return; - } - - if (!getMNIST(mnistPaths.values())) { - System.out.println(">>> You should have MNIST dataset in " + MNIST_DIR + " to run this example."); - return; - } - - trainingImagesPath = Objects.requireNonNull(IgniteUtils.resolveIgnitePath(MNIST_DIR + "/" + - mnistPaths.get(MNIST_TRAIN_IMAGES))).getPath(); - trainingLabelsPath = Objects.requireNonNull(IgniteUtils.resolveIgnitePath(MNIST_DIR + "/" + - mnistPaths.get(MNIST_TRAIN_LABELS))).getPath(); - testImagesPath = Objects.requireNonNull(IgniteUtils.resolveIgnitePath(MNIST_DIR + "/" + - mnistPaths.get(MNIST_TEST_IMAGES))).getPath(); - testLabelsPath = Objects.requireNonNull(IgniteUtils.resolveIgnitePath(MNIST_DIR + "/" + - mnistPaths.get(MNIST_TEST_LABELS))).getPath(); - - try (Ignite ignite = Ignition.start(igniteCfgPath)) { - IgniteUtils.setCurrentIgniteName(ignite.configuration().getIgniteInstanceName()); - - int ptsCnt = 60000; - int featCnt = 28 * 28; - - Stream trainingMnistStream = MnistUtils.mnistAsStream(trainingImagesPath, trainingLabelsPath, - new Random(123L), ptsCnt); - - Stream testMnistStream = MnistUtils.mnistAsStream(testImagesPath, testLabelsPath, - new Random(123L), 10_000); - - IgniteCache cache = createBiIndexedCache(ignite); - - loadVectorsIntoBiIndexedCache(cache.getName(), trainingMnistStream.iterator(), featCnt + 1, ignite); - - ColumnDecisionTreeTrainer trainer = new ColumnDecisionTreeTrainer<>(10, - ContinuousSplitCalculators.GINI.apply(ignite), - RegionCalculators.GINI, - RegionCalculators.MOST_COMMON, - ignite); - - System.out.println(">>> Training started"); - long before = System.currentTimeMillis(); - DecisionTreeModel mdl = trainer.train(new BiIndexedCacheColumnDecisionTreeTrainerInput(cache, new HashMap<>(), ptsCnt, featCnt)); - System.out.println(">>> Training finished in " + (System.currentTimeMillis() - before)); - - IgniteTriFunction, Stream>, Function, Double> mse = - Estimators.errorsPercentage(); - - Double accuracy = mse.apply(mdl, testMnistStream.map(v -> - new IgniteBiTuple<>(v.viewPart(0, featCnt), v.getX(featCnt))), Function.identity()); - - System.out.println(">>> Errs percentage: " + accuracy); - } - catch (IOException e) { - e.printStackTrace(); - } - - System.out.println(">>> Decision trees example finished."); - } - - /** - * Get MNIST dataset. Value of predicate 'MNIST dataset is present in expected folder' is returned. - * - * @param mnistFileNames File names of MNIST dataset. - * @return Value of predicate 'MNIST dataset is present in expected folder'. - * @throws IOException In case of file system errors. - */ - private static boolean getMNIST(Collection mnistFileNames) throws IOException { - List missing = mnistFileNames.stream(). - filter(f -> IgniteUtils.resolveIgnitePath(MNIST_DIR + "/" + f) == null). - collect(Collectors.toList()); - - if (!missing.isEmpty()) { - System.out.println(">>> You have not fully downloaded MNIST dataset in directory " + MNIST_DIR + - ", do you want it to be downloaded? [y]/n"); - Scanner s = new Scanner(System.in); - String str = s.nextLine(); - - if (!str.isEmpty() && !str.toLowerCase().equals("y")) - return false; - } - - for (String s : missing) { - String f = s + ".gz"; - System.out.println(">>> Downloading " + f + "..."); - URL website = new URL("http://yann.lecun.com/exdb/mnistAsStream/" + f); - ReadableByteChannel rbc = Channels.newChannel(website.openStream()); - FileOutputStream fos = new FileOutputStream(MNIST_DIR + "/" + f); - fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); - System.out.println(">>> Done."); - - System.out.println(">>> Unzipping " + f + "..."); - unzip(MNIST_DIR + "/" + f, MNIST_DIR + "/" + s); - - System.out.println(">>> Deleting gzip " + f + ", status: " + - Objects.requireNonNull(IgniteUtils.resolveIgnitePath(MNIST_DIR + "/" + f)).delete()); - - System.out.println(">>> Done."); - } - - return true; - } - - /** - * Unzip file located in {@code input} to {@code output}. - * - * @param input Input file path. - * @param output Output file path. - * @throws IOException In case of file system errors. - */ - private static void unzip(String input, String output) throws IOException { - byte[] buf = new byte[1024]; - - try (GZIPInputStream gis = new GZIPInputStream(new FileInputStream(input)); - FileOutputStream out = new FileOutputStream(output)) { - int sz; - while ((sz = gis.read(buf)) > 0) - out.write(buf, 0, sz); - } - } - - /** - * Build cli options. - */ - @NotNull private static Options buildOptions() { - Options options = new Options(); - - Option cfgOpt = OptionBuilder - .withArgName(CONFIG) - .withLongOpt(CONFIG) - .hasArg() - .withDescription("Path to the config.") - .isRequired(false).create(); - - Option unattended = OptionBuilder - .withArgName(MLExamplesCommonArgs.UNATTENDED) - .withLongOpt(MLExamplesCommonArgs.UNATTENDED) - .withDescription("Is example run unattended.") - .isRequired(false).create(); - - options.addOption(cfgOpt); - options.addOption(unattended); - - return options; - } - - /** - * Creates cache where data for training is stored. - * - * @param ignite Ignite instance. - * @return cache where data for training is stored. - */ - private static IgniteCache createBiIndexedCache(Ignite ignite) { - CacheConfiguration cfg = new CacheConfiguration<>(); - - // Write to primary. - cfg.setWriteSynchronizationMode(CacheWriteSynchronizationMode.PRIMARY_SYNC); - - // No copying of values. - cfg.setCopyOnRead(false); - - cfg.setName("TMP_BI_INDEXED_CACHE"); - - return ignite.getOrCreateCache(cfg); - } - - /** - * Loads vectors into cache. - * - * @param cacheName Name of cache. - * @param vectorsIter Iterator over vectors to load. - * @param vectorSize Size of vector. - * @param ignite Ignite instance. - */ - private static void loadVectorsIntoBiIndexedCache(String cacheName, Iterator vectorsIter, - int vectorSize, Ignite ignite) { - try (IgniteDataStreamer streamer = - ignite.dataStreamer(cacheName)) { - int sampleIdx = 0; - - streamer.perNodeBufferSize(10000); - - while (vectorsIter.hasNext()) { - org.apache.ignite.ml.math.Vector next = vectorsIter.next(); - - for (int i = 0; i < vectorSize; i++) - streamer.addData(new BiIndex(sampleIdx, i), next.getX(i)); - - sampleIdx++; - - if (sampleIdx % 1000 == 0) - System.out.println(">>> Loaded " + sampleIdx + " vectors."); - } - } - } -} diff --git a/examples/src/main/java/org/apache/ignite/examples/ml/trees/package-info.java b/examples/src/main/java/org/apache/ignite/examples/ml/trees/package-info.java deleted file mode 100644 index d944f60570b0b..0000000000000 --- a/examples/src/main/java/org/apache/ignite/examples/ml/trees/package-info.java +++ /dev/null @@ -1,22 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * - * Decision trees examples. - */ -package org.apache.ignite.examples.ml.trees; diff --git a/examples/src/main/java/org/apache/ignite/examples/sql/SqlJdbcCopyExample.java b/examples/src/main/java/org/apache/ignite/examples/sql/SqlJdbcCopyExample.java index 1271b3935ff1f..394c3b0425dbb 100644 --- a/examples/src/main/java/org/apache/ignite/examples/sql/SqlJdbcCopyExample.java +++ b/examples/src/main/java/org/apache/ignite/examples/sql/SqlJdbcCopyExample.java @@ -58,8 +58,8 @@ public static void main(String[] args) throws Exception { print("Created database objects."); // Load data from CSV file. - executeCommand(conn, "COPY FROM \"" + - IgniteUtils.resolveIgnitePath("examples/src/main/resources/sql/city.csv") + "\" " + + executeCommand(conn, "COPY FROM '" + + IgniteUtils.resolveIgnitePath("examples/src/main/resources/sql/city.csv") + "' " + "INTO City (ID, Name, CountryCode, District, Population) FORMAT CSV"); // Read data. diff --git a/examples/src/main/spark/org/apache/ignite/examples/spark/JavaIgniteCatalogExample.java b/examples/src/main/spark/org/apache/ignite/examples/spark/JavaIgniteCatalogExample.java new file mode 100644 index 0000000000000..c9313f6792b31 --- /dev/null +++ b/examples/src/main/spark/org/apache/ignite/examples/spark/JavaIgniteCatalogExample.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.spark; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.spark.sql.AnalysisException; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.ignite.IgniteSparkSession; + +import static org.apache.ignite.internal.util.typedef.X.println; + +/** + * + */ +public class JavaIgniteCatalogExample { + /** + * Ignite config file. + */ + private static final String CONFIG = "examples/config/example-ignite.xml"; + + /** + * Test cache name. + */ + private static final String CACHE_NAME = "testCache"; + + /** */ + public static void main(String args[]) throws AnalysisException { + + setupServerAndData(); + + //Creating Ignite-specific implementation of Spark session. + IgniteSparkSession igniteSession = IgniteSparkSession.builder() + .appName("Spark Ignite catalog example") + .master("local") + .config("spark.executor.instances", "2") + .igniteConfig(CONFIG) + .getOrCreate(); + + //Adjust the logger to exclude the logs of no interest. + Logger.getRootLogger().setLevel(Level.ERROR); + Logger.getLogger("org.apache.ignite").setLevel(Level.INFO); + + System.out.println("List of available tables:"); + + //Showing existing tables. + igniteSession.catalog().listTables().show(); + + System.out.println("PERSON table description:"); + + //Showing `person` schema. + igniteSession.catalog().listColumns("person").show(); + + System.out.println("CITY table description:"); + + //Showing `city` schema. + igniteSession.catalog().listColumns("city").show(); + + println("Querying all persons from city with ID=2."); + + //Selecting data through Spark SQL engine. + Dataset df = igniteSession.sql("SELECT * FROM person WHERE CITY_ID = 2"); + + System.out.println("Result schema:"); + + df.printSchema(); + + System.out.println("Result content:"); + + df.show(); + + System.out.println("Querying all persons living in Denver."); + + //Selecting data through Spark SQL engine. + Dataset df2 = igniteSession.sql("SELECT * FROM person p JOIN city c ON c.ID = p.CITY_ID WHERE c.NAME = 'Denver'"); + + System.out.println("Result schema:"); + + df2.printSchema(); + + System.out.println("Result content:"); + + df2.show(); + + Ignition.stop(false); + } + + /** */ + private static void setupServerAndData() { + //Starting Ignite. + Ignite ignite = Ignition.start(CONFIG); + + //Creating cache. + CacheConfiguration ccfg = new CacheConfiguration<>(CACHE_NAME).setSqlSchema("PUBLIC"); + + IgniteCache cache = ignite.getOrCreateCache(ccfg); + + //Create tables. + cache.query(new SqlFieldsQuery( + "CREATE TABLE city (id LONG PRIMARY KEY, name VARCHAR) WITH \"template=replicated\"")).getAll(); + + cache.query(new SqlFieldsQuery( + "CREATE TABLE person (id LONG, name VARCHAR, city_id LONG, PRIMARY KEY (id, city_id)) " + + "WITH \"backups=1, affinityKey=city_id\"")).getAll(); + + cache.query(new SqlFieldsQuery("CREATE INDEX on Person (city_id)")).getAll(); + + //Inserting some data into table. + SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO city (id, name) VALUES (?, ?)"); + + cache.query(qry.setArgs(1L, "Forest Hill")).getAll(); + cache.query(qry.setArgs(2L, "Denver")).getAll(); + cache.query(qry.setArgs(3L, "St. Petersburg")).getAll(); + + qry = new SqlFieldsQuery("INSERT INTO person (id, name, city_id) values (?, ?, ?)"); + + cache.query(qry.setArgs(1L, "John Doe", 3L)).getAll(); + cache.query(qry.setArgs(2L, "Jane Roe", 2L)).getAll(); + cache.query(qry.setArgs(3L, "Mary Major", 1L)).getAll(); + cache.query(qry.setArgs(4L, "Richard Miles", 2L)).getAll(); + } +} diff --git a/examples/src/main/spark/org/apache/ignite/examples/spark/JavaIgniteDataFrameExample.java b/examples/src/main/spark/org/apache/ignite/examples/spark/JavaIgniteDataFrameExample.java new file mode 100644 index 0000000000000..20bcf83e2d4d4 --- /dev/null +++ b/examples/src/main/spark/org/apache/ignite/examples/spark/JavaIgniteDataFrameExample.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.spark; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.spark.IgniteDataFrameSettings; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.SparkSession; + +import static org.apache.spark.sql.functions.col; + +/** + * + */ +public class JavaIgniteDataFrameExample { + /** + * Ignite config file. + */ + private static final String CONFIG = "examples/config/example-ignite.xml"; + + /** + * Test cache name. + */ + private static final String CACHE_NAME = "testCache"; + + /** */ + public static void main(String args[]) { + + setupServerAndData(); + + //Creating spark session. + SparkSession spark = SparkSession + .builder() + .appName("JavaIgniteDataFrameExample") + .master("local") + .config("spark.executor.instances", "2") + .getOrCreate(); + + // Adjust the logger to exclude the logs of no interest. + Logger.getRootLogger().setLevel(Level.ERROR); + Logger.getLogger("org.apache.ignite").setLevel(Level.INFO); + + // Executing examples. + + sparkDSLExample(spark); + + nativeSparkSqlExample(spark); + + Ignition.stop(false); + } + + /** */ + private static void sparkDSLExample(SparkSession spark) { + System.out.println("Querying using Spark DSL."); + + Dataset igniteDF = spark.read() + .format(IgniteDataFrameSettings.FORMAT_IGNITE()) //Data source type. + .option(IgniteDataFrameSettings.OPTION_TABLE(), "person") //Table to read. + .option(IgniteDataFrameSettings.OPTION_CONFIG_FILE(), CONFIG) //Ignite config. + .load() + .filter(col("id").geq(2)) //Filter clause. + .filter(col("name").like("%M%")); //Another filter clause. + + System.out.println("Data frame schema:"); + + igniteDF.printSchema(); //Printing query schema to console. + + System.out.println("Data frame content:"); + + igniteDF.show(); //Printing query results to console. + } + + /** */ + private static void nativeSparkSqlExample(SparkSession spark) { + System.out.println("Querying using Spark SQL."); + + Dataset df = spark.read() + .format(IgniteDataFrameSettings.FORMAT_IGNITE()) //Data source type. + .option(IgniteDataFrameSettings.OPTION_TABLE(), "person") //Table to read. + .option(IgniteDataFrameSettings.OPTION_CONFIG_FILE(), CONFIG) //Ignite config. + .load(); + + //Registering DataFrame as Spark view. + df.createOrReplaceTempView("person"); + + //Selecting data from Ignite through Spark SQL Engine. + Dataset igniteDF = spark.sql("SELECT * FROM person WHERE id >= 2 AND name = 'Mary Major'"); + + System.out.println("Result schema:"); + + igniteDF.printSchema(); //Printing query schema to console. + + System.out.println("Result content:"); + + igniteDF.show(); //Printing query results to console. + } + + /** */ + private static void setupServerAndData() { + //Starting Ignite. + Ignite ignite = Ignition.start(CONFIG); + + //Creating first test cache. + CacheConfiguration ccfg = new CacheConfiguration<>(CACHE_NAME).setSqlSchema("PUBLIC"); + + IgniteCache cache = ignite.getOrCreateCache(ccfg); + + //Creating SQL tables. + cache.query(new SqlFieldsQuery( + "CREATE TABLE city (id LONG PRIMARY KEY, name VARCHAR) WITH \"template=replicated\"")).getAll(); + + cache.query(new SqlFieldsQuery( + "CREATE TABLE person (id LONG, name VARCHAR, city_id LONG, PRIMARY KEY (id, city_id)) " + + "WITH \"backups=1, affinity_key=city_id\"")).getAll(); + + cache.query(new SqlFieldsQuery("CREATE INDEX on Person (city_id)")).getAll(); + + SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO city (id, name) VALUES (?, ?)"); + + //Inserting some data to tables. + cache.query(qry.setArgs(1L, "Forest Hill")).getAll(); + cache.query(qry.setArgs(2L, "Denver")).getAll(); + cache.query(qry.setArgs(3L, "St. Petersburg")).getAll(); + + qry = new SqlFieldsQuery("INSERT INTO person (id, name, city_id) values (?, ?, ?)"); + + cache.query(qry.setArgs(1L, "John Doe", 3L)).getAll(); + cache.query(qry.setArgs(2L, "Jane Roe", 2L)).getAll(); + cache.query(qry.setArgs(3L, "Mary Major", 1L)).getAll(); + cache.query(qry.setArgs(4L, "Richard Miles", 2L)).getAll(); + } +} diff --git a/examples/src/main/spark/org/apache/ignite/examples/spark/JavaIgniteDataFrameWriteExample.java b/examples/src/main/spark/org/apache/ignite/examples/spark/JavaIgniteDataFrameWriteExample.java new file mode 100644 index 0000000000000..6fc1393dc0812 --- /dev/null +++ b/examples/src/main/spark/org/apache/ignite/examples/spark/JavaIgniteDataFrameWriteExample.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.examples.spark; + +import org.apache.ignite.Ignite; +import org.apache.ignite.IgniteCache; +import org.apache.ignite.Ignition; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.internal.util.IgniteUtils; +import org.apache.ignite.spark.IgniteDataFrameSettings; +import org.apache.log4j.Level; +import org.apache.log4j.Logger; +import org.apache.spark.sql.Dataset; +import org.apache.spark.sql.Row; +import org.apache.spark.sql.SaveMode; +import org.apache.spark.sql.SparkSession; + +import java.util.List; + +import static org.apache.ignite.internal.util.IgniteUtils.resolveIgnitePath; +import static org.apache.spark.sql.functions.col; +import static org.apache.spark.sql.functions.reverse; + +/** + * + */ +public class JavaIgniteDataFrameWriteExample { + /** + * Ignite config file. + */ + private static final String CONFIG = "examples/config/example-ignite.xml"; + + /** + * Test cache name. + */ + private static final String CACHE_NAME = "testCache"; + + /** */ + public static void main(String args[]) { + //Starting Ignite. + Ignite ignite = Ignition.start(CONFIG); + + //Starting Ignite server node. + setupServerAndData(ignite); + + //Creating spark session. + SparkSession spark = SparkSession + .builder() + .appName("Spark Ignite data sources write example") + .master("local") + .config("spark.executor.instances", "2") + .getOrCreate(); + + // Adjust the logger to exclude the logs of no interest. + Logger.getRootLogger().setLevel(Level.ERROR); + Logger.getLogger("org.apache.ignite").setLevel(Level.INFO); + + // Executing examples. + System.out.println("Example of writing json file to Ignite:"); + + writeJSonToIgnite(ignite, spark); + + System.out.println("Example of modifying existing Ignite table data through Data Fram API:"); + + editDataAndSaveToNewTable(ignite, spark); + + Ignition.stop(false); + } + + /** */ + private static void writeJSonToIgnite(Ignite ignite, SparkSession spark) { + //Load content of json file to data frame. + Dataset personsDataFrame = spark.read().json( + resolveIgnitePath("examples/src/main/resources/person.json").getAbsolutePath()); + + System.out.println("Json file content:"); + + //Printing content of json file to console. + personsDataFrame.show(); + + System.out.println("Writing Data Frame to Ignite:"); + + //Writing content of data frame to Ignite. + personsDataFrame.write() + .format(IgniteDataFrameSettings.FORMAT_IGNITE()) + .option(IgniteDataFrameSettings.OPTION_CONFIG_FILE(), CONFIG) + .option(IgniteDataFrameSettings.OPTION_TABLE(), "json_person") + .option(IgniteDataFrameSettings.OPTION_CREATE_TABLE_PRIMARY_KEY_FIELDS(), "id") + .option(IgniteDataFrameSettings.OPTION_CREATE_TABLE_PARAMETERS(), "template=replicated") + .save(); + + System.out.println("Done!"); + + System.out.println("Reading data from Ignite table:"); + + CacheConfiguration ccfg = new CacheConfiguration<>(CACHE_NAME); + + IgniteCache cache = ignite.getOrCreateCache(ccfg); + + //Reading saved data from Ignite. + List> data = cache.query(new SqlFieldsQuery("SELECT id, name, department FROM json_person")).getAll(); + + System.out.println(data); + } + + /** */ + private static void editDataAndSaveToNewTable(Ignite ignite, SparkSession spark) { + //Load content of Ignite table to data frame. + Dataset personDataFrame = spark.read() + .format(IgniteDataFrameSettings.FORMAT_IGNITE()) + .option(IgniteDataFrameSettings.OPTION_CONFIG_FILE(), CONFIG) + .option(IgniteDataFrameSettings.OPTION_TABLE(), "person") + .load(); + + System.out.println("Data frame content:"); + + //Printing content of data frame to console. + personDataFrame.show(); + + System.out.println("Modifying Data Frame and write it to Ignite:"); + + personDataFrame + .withColumn("id", col("id").plus(42)) //Edit id column + .withColumn("name", reverse(col("name"))) //Edit name column + .write().format(IgniteDataFrameSettings.FORMAT_IGNITE()) + .option(IgniteDataFrameSettings.OPTION_CONFIG_FILE(), CONFIG) + .option(IgniteDataFrameSettings.OPTION_TABLE(), "new_persons") + .option(IgniteDataFrameSettings.OPTION_CREATE_TABLE_PRIMARY_KEY_FIELDS(), "id, city_id") + .option(IgniteDataFrameSettings.OPTION_CREATE_TABLE_PARAMETERS(), "backups=1") + .mode(SaveMode.Overwrite) //Overwriting entire table. + .save(); + + System.out.println("Done!"); + + System.out.println("Reading data from Ignite table:"); + + CacheConfiguration ccfg = new CacheConfiguration<>(CACHE_NAME); + + IgniteCache cache = ignite.getOrCreateCache(ccfg); + + //Reading saved data from Ignite. + List> data = cache.query(new SqlFieldsQuery("SELECT id, name, city_id FROM new_persons")).getAll(); + + System.out.println(data); + } + + /** */ + private static void setupServerAndData(Ignite ignite) { + //Creating first test cache. + CacheConfiguration ccfg = new CacheConfiguration<>(CACHE_NAME).setSqlSchema("PUBLIC"); + + IgniteCache cache = ignite.getOrCreateCache(ccfg); + + //Creating SQL table. + cache.query(new SqlFieldsQuery( + "CREATE TABLE person (id LONG, name VARCHAR, city_id LONG, PRIMARY KEY (id)) " + + "WITH \"backups=1\"")).getAll(); + + cache.query(new SqlFieldsQuery("CREATE INDEX on Person (city_id)")).getAll(); + + //Inserting some data to tables. + SqlFieldsQuery qry = new SqlFieldsQuery("INSERT INTO person (id, name, city_id) values (?, ?, ?)"); + + cache.query(qry.setArgs(1L, "John Doe", 3L)).getAll(); + cache.query(qry.setArgs(2L, "Jane Roe", 2L)).getAll(); + cache.query(qry.setArgs(3L, "Mary Major", 1L)).getAll(); + cache.query(qry.setArgs(4L, "Richard Miles", 2L)).getAll(); + } +} diff --git a/examples/src/test/spark/org/apache/ignite/spark/examples/JavaIgniteDataFrameSelfTest.java b/examples/src/test/spark/org/apache/ignite/spark/examples/JavaIgniteDataFrameSelfTest.java new file mode 100644 index 0000000000000..295814d96383d --- /dev/null +++ b/examples/src/test/spark/org/apache/ignite/spark/examples/JavaIgniteDataFrameSelfTest.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.spark.examples; + +import org.apache.ignite.examples.spark.JavaIgniteCatalogExample; +import org.apache.ignite.examples.spark.JavaIgniteDataFrameExample; +import org.apache.ignite.examples.spark.JavaIgniteDataFrameWriteExample; +import org.apache.ignite.testframework.junits.common.GridAbstractExamplesTest; +import org.junit.Test; + +/** + */ +public class JavaIgniteDataFrameSelfTest extends GridAbstractExamplesTest { + static final String[] EMPTY_ARGS = new String[0]; + + /** + * @throws Exception If failed. + */ + @Test + public void testCatalogExample() throws Exception { + JavaIgniteCatalogExample.main(EMPTY_ARGS); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testDataFrameExample() throws Exception { + JavaIgniteDataFrameExample.main(EMPTY_ARGS); + } + + /** + * @throws Exception If failed. + */ + @Test + public void testDataFrameWriteExample() throws Exception { + JavaIgniteDataFrameWriteExample.main(EMPTY_ARGS); + } +} diff --git a/examples/src/test/spark/org/apache/ignite/spark/testsuites/IgniteExamplesSparkSelfTestSuite.java b/examples/src/test/spark/org/apache/ignite/spark/testsuites/IgniteExamplesSparkSelfTestSuite.java index df1a243e3f067..6328ee241a190 100644 --- a/examples/src/test/spark/org/apache/ignite/spark/testsuites/IgniteExamplesSparkSelfTestSuite.java +++ b/examples/src/test/spark/org/apache/ignite/spark/testsuites/IgniteExamplesSparkSelfTestSuite.java @@ -19,6 +19,7 @@ import junit.framework.TestSuite; import org.apache.ignite.spark.examples.IgniteDataFrameSelfTest; +import org.apache.ignite.spark.examples.JavaIgniteDataFrameSelfTest; import org.apache.ignite.spark.examples.SharedRDDExampleSelfTest; import org.apache.ignite.testframework.GridTestUtils; @@ -42,6 +43,7 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(SharedRDDExampleSelfTest.class)); suite.addTest(new TestSuite(IgniteDataFrameSelfTest.class)); + suite.addTest(new TestSuite(JavaIgniteDataFrameSelfTest.class)); return suite; } diff --git a/modules/aop/pom.xml b/modules/aop/pom.xml index 3e727d985083a..0665032fd78fa 100644 --- a/modules/aop/pom.xml +++ b/modules/aop/pom.xml @@ -31,7 +31,7 @@ ignite-aop - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org diff --git a/modules/aop/src/test/config/aop/aspectj/META-INF/aop.xml b/modules/aop/src/test/config/aop/aspectj/META-INF/aop.xml index 79417da2ef21b..8741bd18cd57a 100644 --- a/modules/aop/src/test/config/aop/aspectj/META-INF/aop.xml +++ b/modules/aop/src/test/config/aop/aspectj/META-INF/aop.xml @@ -139,7 +139,7 @@ - + @@ -285,7 +285,7 @@ - + diff --git a/modules/apache-license-gen/pom.xml b/modules/apache-license-gen/pom.xml index 31ab20e14eb19..abb4b1caad7e5 100644 --- a/modules/apache-license-gen/pom.xml +++ b/modules/apache-license-gen/pom.xml @@ -31,7 +31,7 @@ org.apache.ignite ignite-apache-license-gen - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org diff --git a/modules/aws/pom.xml b/modules/aws/pom.xml index 8540b84e87c8b..29c245dd31dc8 100644 --- a/modules/aws/pom.xml +++ b/modules/aws/pom.xml @@ -31,7 +31,7 @@ ignite-aws - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org @@ -71,22 +71,25 @@ ${aws.sdk.version}
+ com.fasterxml.jackson.core jackson-core - ${jackson2.version} + ${jackson.version} + com.fasterxml.jackson.core jackson-annotations - ${jackson2.version} + ${jackson.version} + com.fasterxml.jackson.core jackson-databind - ${jackson2.version} + ${jackson.version} diff --git a/modules/benchmarks/pom.xml b/modules/benchmarks/pom.xml index 9e000d46706b7..b39c4baf7b465 100644 --- a/modules/benchmarks/pom.xml +++ b/modules/benchmarks/pom.xml @@ -31,7 +31,7 @@ ignite-benchmarks - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org @@ -51,6 +51,12 @@ ${project.version} + + org.openjdk.jol + jol-core + 0.9 + + org.openjdk.jmh jmh-core @@ -62,6 +68,16 @@ ${jmh.version} provided + + org.mockito + mockito-all + ${mockito.version} + + + com.google.guava + guava + ${guava.version} +
@@ -131,4 +147,4 @@ - \ No newline at end of file + diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/algo/BenchmarkCRC.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/algo/BenchmarkCRC.java new file mode 100644 index 0000000000000..5c922fead0c80 --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/algo/BenchmarkCRC.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jmh.algo; + +import org.apache.ignite.internal.processors.cache.persistence.wal.crc.FastCrc; +import org.apache.ignite.internal.processors.cache.persistence.wal.crc.PureJavaCrc32; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; + +import java.nio.ByteBuffer; +import java.util.Random; + +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static org.openjdk.jmh.annotations.Mode.AverageTime; +import static org.openjdk.jmh.annotations.Scope.Thread; + +/** + * + */ +@State(Thread) +@OutputTimeUnit(NANOSECONDS) +@BenchmarkMode(AverageTime) +@Fork(value = 1, jvmArgsAppend = {"-XX:+UnlockDiagnosticVMOptions"}) +@Warmup(iterations = 5) +@Measurement(iterations = 5) +public class BenchmarkCRC { + /** */ + static final int SIZE = 1024; + + /** */ + static final int BUF_LEN = 4096; + + /** */ + @State(Thread) + public static class Context { + /** */ + final int[] results = new int[SIZE]; + + /** */ + final ByteBuffer bb = ByteBuffer.allocate(BUF_LEN); + + /** */ + @Setup + public void setup() { + new Random().ints(BUF_LEN, Byte.MIN_VALUE, Byte.MAX_VALUE).forEach(k -> bb.put((byte) k)); + } + } + + /** */ + @Benchmark + public int[] pureJavaCrc32(Context context) { + for (int i = 0; i < SIZE; i++) { + context.bb.rewind(); + + context.results[i] = PureJavaCrc32.calcCrc32(context.bb, BUF_LEN); + } + + return context.results; + } + + /** */ + @Benchmark + public int[] crc32(Context context) { + for (int i = 0; i < SIZE; i++) { + context.bb.rewind(); + + context.results[i] = FastCrc.calcCrc(context.bb, BUF_LEN); + } + + return context.results; + } +} + + diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/collections/SmallHashSetsVsReadOnlyViewBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/collections/SmallHashSetsVsReadOnlyViewBenchmark.java new file mode 100644 index 0000000000000..d1307929e7a3d --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/collections/SmallHashSetsVsReadOnlyViewBenchmark.java @@ -0,0 +1,154 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jmh.collections; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.UUID; +import org.apache.ignite.internal.benchmarks.jmh.JmhAbstractBenchmark; +import org.apache.ignite.internal.benchmarks.jmh.runner.JmhIdeBenchmarkRunner; +import org.apache.ignite.internal.benchmarks.model.Node; +import org.apache.ignite.internal.processors.affinity.AffinityAssignment; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.lang.IgniteClosure; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.OutputTimeUnit; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; + +import static java.util.concurrent.TimeUnit.NANOSECONDS; +import static org.openjdk.jmh.annotations.Mode.Throughput; + +/** + * Comparison of HashMap vs view on List on small sizes. + */ +@State(Scope.Benchmark) +@OutputTimeUnit(NANOSECONDS) +@BenchmarkMode(Throughput) +public class SmallHashSetsVsReadOnlyViewBenchmark extends JmhAbstractBenchmark { + /** */ + private static final int SIZE = AffinityAssignment.IGNITE_AFFINITY_BACKUPS_THRESHOLD; + + /** */ + private static final int PARTS = 8192; + + /** + * + * @param args Args. + * @throws Exception Exception. + */ + public static void main(String[] args) throws Exception { + JmhIdeBenchmarkRunner.create() + .threads(1) + .measurementIterations(20) + .benchmarks(SmallHashSetsVsReadOnlyViewBenchmark.class.getSimpleName()) + .run(); + } + + /** */ + private final Random random = new Random(); + + /** */ + private final List> hashSets = new ArrayList<>(); + + /** */ + private final List> lists = new ArrayList<>(); + + /** */ + private final Node[] nodes = new Node[SIZE]; + + /** */ + @Setup + public void setup() { + for (int i = 0; i < SIZE; i++) + nodes[i] = new Node(UUID.randomUUID()); + + for (int i= 0; i < PARTS; i++) { + Collection hashSet = new HashSet<>(); + + for (int j = 0; j < SIZE; j++) + hashSet.add(nodes[j].getUuid()); + + hashSets.add(hashSet); + + List list = new ArrayList<>(SIZE); + + for (int j = 0; j < SIZE; j++) + list.add(nodes[j]); + + lists.add(list); + } + } + + /** */ + @Benchmark + public boolean hashSetContainsRandom() { + return hashSets.get(random.nextInt(PARTS)) + .contains(nodes[random.nextInt(SIZE)].getUuid()); + } + + /** */ + @Benchmark + public boolean readOnlyViewContainsRandom() { + return F.viewReadOnly( + lists.get(random.nextInt(PARTS)), + (IgniteClosure)Node::getUuid + ).contains(nodes[random.nextInt(SIZE)].getUuid()); + } + + /** */ + @Benchmark + public boolean hashSetIteratorRandom() { + UUID randomUuid = nodes[random.nextInt(SIZE)].getUuid(); + + Collection col = hashSets.get(random.nextInt(PARTS)); + + boolean contains = false; + + for(UUID uuid : col) + if (randomUuid.equals(uuid)) + contains = true; + + return contains; + } + + /** */ + @Benchmark + public boolean readOnlyViewIteratorRandom() { + UUID randomUuid = nodes[random.nextInt(SIZE)].getUuid(); + + Collection col = F.viewReadOnly( + lists.get(random.nextInt(PARTS)), + (IgniteClosure)Node::getUuid + ); + + boolean contains = false; + + for(UUID uuid : col) + if (randomUuid.equals(uuid)) + contains = true; + + return contains; + } +} + diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/diagnostic/pagelocktracker/JmhPageLockTrackerBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/diagnostic/pagelocktracker/JmhPageLockTrackerBenchmark.java new file mode 100644 index 0000000000000..766218b0cf0fa --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/diagnostic/pagelocktracker/JmhPageLockTrackerBenchmark.java @@ -0,0 +1,144 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jmh.diagnostic.pagelocktracker; + +import org.apache.ignite.internal.benchmarks.jmh.diagnostic.pagelocktracker.stack.LockTrackerNoBarrier; +import org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.LockTrackerFactory; +import org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.PageLockTracker; +import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Fork; +import org.openjdk.jmh.annotations.Measurement; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.Param; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Warmup; +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.options.Options; +import org.openjdk.jmh.runner.options.OptionsBuilder; + +import static org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.LockTrackerFactory.HEAP_LOG; +import static org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.LockTrackerFactory.HEAP_STACK; +import static org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.LockTrackerFactory.OFF_HEAP_LOG; +import static org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.LockTrackerFactory.OFF_HEAP_STACK; + +/** + * Benchmark PageLockTracker (factory LockTrackerFactory) + */ +public class JmhPageLockTrackerBenchmark { + /** + * @param args Params. + */ + public static void main(String[] args) throws Exception { + Options opt = new OptionsBuilder() + .include(JmhPageLockTrackerBenchmark.class.getSimpleName()) + .build(); + + new Runner(opt).run(); + } + + /** */ + @State(Scope.Thread) + public static class ThreadLocalState { + PageLockListener pl; + + @Param({"2", "4", "8", "16"}) + int stackSize; + + @Param({ + "HeapArrayLockStack", + "HeapArrayLockLog", + "OffHeapLockStack", + "OffHeapLockLog" + }) + String type; + + @Param({"true", "false"}) + boolean barrier; + + int StructureId = 123; + + @Setup + public void doSetup() { + pl = create(Thread.currentThread().getName(), type, barrier); + } + } + + /** + * Mesure cost for (beforelock -> lock -> unlock) operation. + */ + @Benchmark + @BenchmarkMode(Mode.Throughput) + @Fork(1) + @Warmup(iterations = 10) + @Measurement(iterations = 10) + //@OutputTimeUnit(TimeUnit.MICROSECONDS) + public void lockUnlock(ThreadLocalState localState) { + PageLockListener pl = localState.pl; + + for (int i = 0; i < localState.stackSize; i++) { + int pageId = i + 1; + + pl.onBeforeReadLock(localState.StructureId, pageId, pageId); + + pl.onReadLock(localState.StructureId, pageId, pageId, pageId); + } + + for (int i = localState.stackSize; i > 0; i--) { + int pageId = i; + + pl.onReadUnlock(localState.StructureId, pageId, pageId, pageId); + } + } + + /** + * Factory method. + * + * @param name Lock tracer name. + * @param type Lock tracer type. + * @param barrier If {@code True} use real implementation, + * if {@code False} use implementation with safety dump barrier. + * @return Page lock tracker as PageLockListener. + */ + private static PageLockListener create(String name, String type, boolean barrier) { + PageLockTracker tracker; + + switch (type) { + case "HeapArrayLockStack": + tracker = LockTrackerFactory.create(HEAP_STACK, name); + break; + case "HeapArrayLockLog": + tracker = LockTrackerFactory.create(HEAP_LOG, name); + break; + case "OffHeapLockStack": + tracker = LockTrackerFactory.create(OFF_HEAP_STACK, name); + break; + + case "OffHeapLockLog": + tracker = LockTrackerFactory.create(OFF_HEAP_LOG, name); + break; + default: + throw new IllegalArgumentException("type:" + type); + } + + return barrier ? tracker : new LockTrackerNoBarrier(tracker); + } +} diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/diagnostic/pagelocktracker/stack/LockTrackerNoBarrier.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/diagnostic/pagelocktracker/stack/LockTrackerNoBarrier.java new file mode 100644 index 0000000000000..2d8f78ba5ab61 --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/diagnostic/pagelocktracker/stack/LockTrackerNoBarrier.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jmh.diagnostic.pagelocktracker.stack; + +import org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.PageLockTracker; +import org.apache.ignite.internal.processors.cache.persistence.tree.util.PageLockListener; + +/** + * Local without barrier syncronization on operation. + */ +public class LockTrackerNoBarrier implements PageLockListener { + /** */ + private final PageLockTracker delegate; + + /** */ + public LockTrackerNoBarrier( + PageLockTracker delegate + ) { + this.delegate = delegate; + } + + /** {@inheritDoc} */ + @Override public void onBeforeWriteLock(int cacheId, long pageId, long page) { + delegate.onBeforeWriteLock0(cacheId, pageId, page); + } + + /** {@inheritDoc} */ + @Override public void onWriteLock(int cacheId, long pageId, long page, long pageAddr) { + delegate.onWriteLock0(cacheId, pageId, page, pageAddr); + } + + /** {@inheritDoc} */ + @Override public void onWriteUnlock(int cacheId, long pageId, long page, long pageAddr) { + delegate.onWriteUnlock0(cacheId, pageId, page, pageAddr); + } + + /** {@inheritDoc} */ + @Override public void onBeforeReadLock(int cacheId, long pageId, long page) { + delegate.onBeforeReadLock0(cacheId, pageId, page); + } + + /** {@inheritDoc} */ + @Override public void onReadLock(int cacheId, long pageId, long page, long pageAddr) { + delegate.onReadLock0(cacheId, pageId, page, pageAddr); + } + + /** {@inheritDoc} */ + @Override public void onReadUnlock(int cacheId, long pageId, long page, long pageAddr) { + delegate.onReadUnlock(cacheId, pageId, page, pageAddr); + } +} diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/misc/GridDhtPartitionsStateValidatorBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/misc/GridDhtPartitionsStateValidatorBenchmark.java new file mode 100644 index 0000000000000..f3bbcb96d6cb9 --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/misc/GridDhtPartitionsStateValidatorBenchmark.java @@ -0,0 +1,185 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jmh.misc; + +import com.google.common.collect.Lists; +import com.google.common.collect.Sets; +import org.apache.ignite.internal.benchmarks.jmh.JmhAbstractBenchmark; +import org.apache.ignite.internal.benchmarks.jmh.runner.JmhIdeBenchmarkRunner; +import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsSingleMessage; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionsStateValidator; +import org.apache.ignite.internal.util.typedef.T2; +import org.jetbrains.annotations.Nullable; +import org.mockito.Matchers; +import org.mockito.Mockito; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.stream.IntStream; + +import static org.openjdk.jmh.annotations.Scope.Thread; + +/** */ +@State(Scope.Benchmark) +public class GridDhtPartitionsStateValidatorBenchmark extends JmhAbstractBenchmark { + /** */ + @State(Thread) + public static class Context { + /** */ + private final UUID localNodeId = UUID.randomUUID(); + + /** */ + private GridCacheSharedContext cctxMock; + + /** */ + private GridDhtPartitionTopology topologyMock; + + /** */ + private GridDhtPartitionsStateValidator validator; + + /** */ + private Map messages = new HashMap<>(); + + /** */ + private UUID ignoreNode = UUID.randomUUID(); + + /** */ + private static final int NODES = 3; + + /** */ + private static final int PARTS = 100; + + /** + * @return Partition mock with specified {@code id}, {@code updateCounter} and {@code size}. + */ + private GridDhtLocalPartition partitionMock(int id, long updateCounter, long size) { + GridDhtLocalPartition partitionMock = Mockito.mock(GridDhtLocalPartition.class); + Mockito.when(partitionMock.id()).thenReturn(id); + Mockito.when(partitionMock.updateCounter()).thenReturn(updateCounter); + Mockito.when(partitionMock.fullSize()).thenReturn(size); + Mockito.when(partitionMock.state()).thenReturn(GridDhtPartitionState.OWNING); + return partitionMock; + } + + /** + * @param countersMap Update counters map. + * @param sizesMap Sizes map. + * @return Message with specified {@code countersMap} and {@code sizeMap}. + */ + private GridDhtPartitionsSingleMessage from(@Nullable Map> countersMap, @Nullable Map sizesMap) { + GridDhtPartitionsSingleMessage msg = new GridDhtPartitionsSingleMessage(); + if (countersMap != null) + msg.addPartitionUpdateCounters(0, countersMap); + if (sizesMap != null) + msg.addPartitionSizes(0, sizesMap); + return msg; + } + + /** */ + @Setup + public void setup() { + // Prepare mocks. + cctxMock = Mockito.mock(GridCacheSharedContext.class); + Mockito.when(cctxMock.localNodeId()).thenReturn(localNodeId); + + topologyMock = Mockito.mock(GridDhtPartitionTopology.class); + Mockito.when(topologyMock.partitionState(Matchers.any(), Matchers.anyInt())).thenReturn(GridDhtPartitionState.OWNING); + Mockito.when(topologyMock.groupId()).thenReturn(0); + + Mockito.when(topologyMock.partitions()).thenReturn(PARTS); + + List localPartitions = Lists.newArrayList(); + + Map> updateCountersMap = new HashMap<>(); + + Map cacheSizesMap = new HashMap<>(); + + IntStream.range(0, PARTS).forEach(k -> { localPartitions.add(partitionMock(k, k + 1, k + 1)); + long us = k > 20 && k <= 30 ? 0 :k + 2L; + updateCountersMap.put(k, new T2<>(k + 2L, us)); + cacheSizesMap.put(k, us); }); + + Mockito.when(topologyMock.localPartitions()).thenReturn(localPartitions); + Mockito.when(topologyMock.currentLocalPartitions()).thenReturn(localPartitions); + + // Form single messages map. + Map messages = new HashMap<>(); + + for (int n = 0; n < NODES; ++n) { + UUID remoteNode = UUID.randomUUID(); + + messages.put(remoteNode, from(updateCountersMap, cacheSizesMap)); + } + + messages.put(ignoreNode, from(updateCountersMap, cacheSizesMap)); + + validator = new GridDhtPartitionsStateValidator(cctxMock); + } + } + + /** */ + @Benchmark + public void testValidatePartitionsUpdateCounters(Context context) { + context.validator.validatePartitionsUpdateCounters(context.topologyMock, + context.messages, Sets.newHashSet(context.ignoreNode)); + } + + /** */ + @Benchmark + public void testValidatePartitionsSizes(Context context) { + context.validator.validatePartitionsSizes(context.topologyMock, context + .messages, Sets.newHashSet(context.ignoreNode)); + } + + /** + * Run benchmarks. + * + * @param args Arguments. + * @throws Exception If failed. + */ + public static void main(String[] args) throws Exception { + run(1); + } + + /** + * Run benchmark. + * + * @param threads Amount of threads. + * @throws Exception If failed. + */ + private static void run(int threads) throws Exception { + JmhIdeBenchmarkRunner.create() + .forks(1) + .threads(threads) + .warmupIterations(5) + .measurementIterations(10) + .benchmarks(GridDhtPartitionsStateValidatorBenchmark.class.getSimpleName()) + .jvmArguments("-XX:+UseG1GC", "-Xms4g", "-Xmx4g") + .run(); + } +} diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java index cef00eedd11c5..dcf8dc8e853d8 100644 --- a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jmh/tree/BPlusTreeBenchmark.java @@ -137,7 +137,7 @@ public void setup() throws Exception { public void tearDown() throws Exception { tree.destroy(); - pageMem.stop(); + pageMem.stop(true); } /** @@ -175,8 +175,20 @@ protected static class TestTree extends BPlusTree { */ TestTree(ReuseList reuseList, int cacheId, PageMemory pageMem, long metaPageId) throws IgniteCheckedException { - super("test", cacheId, pageMem, null, new AtomicLong(), metaPageId, reuseList, - new IOVersions<>(new LongInnerIO()), new IOVersions<>(new LongLeafIO())); + super( + "test", + cacheId, + null, + pageMem, + null, + new AtomicLong(), + metaPageId, + reuseList, + new IOVersions<>(new LongInnerIO()), + new IOVersions<>(new LongLeafIO()), + null, + null + ); PageIO.registerTest(latestInnerIO(), latestLeafIO()); diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/FileStoreHeapUtilizationJolBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/FileStoreHeapUtilizationJolBenchmark.java new file mode 100644 index 0000000000000..1dc7d474270ed --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/FileStoreHeapUtilizationJolBenchmark.java @@ -0,0 +1,88 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jol; + +import java.io.File; +import java.nio.ByteBuffer; +import java.nio.file.Path; +import java.util.LinkedList; +import java.util.List; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.pagemem.store.PageStore; +import org.apache.ignite.internal.processors.cache.persistence.file.AsyncFileIOFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreFactory; +import org.apache.ignite.internal.processors.cache.persistence.file.FileVersionCheckingFactory; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.openjdk.jol.info.GraphLayout; + +import static org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.PART_FILE_TEMPLATE; + +/** + * + */ +public class FileStoreHeapUtilizationJolBenchmark { + /** */ + private void benchmark() throws IgniteCheckedException { + FilePageStoreFactory factory = new FileVersionCheckingFactory( + new AsyncFileIOFactory(), + new AsyncFileIOFactory(), + new DataStorageConfiguration() + .setPageSize(4096) + ); + + List stores = new LinkedList<>(); + + File workDir = U.resolveWorkDirectory(U.defaultWorkDirectory(), "db", false); + + for (int i = 0; i < 10000; i++) { + final int p = i; + + PageStore ps = factory.createPageStore( + PageMemory.FLAG_DATA, + () -> getPartitionFilePath(workDir, p), + d -> { } + ); + + ps.ensure(); + + ps.write(0, ByteBuffer.allocate(256), 1, false); + + stores.add(ps); + } + + System.gc(); + + GraphLayout layout = GraphLayout.parseInstance(stores); + + System.out.println("heap usage: " + layout.totalSize()); + + U.delete(workDir); + } + + /** */ + private Path getPartitionFilePath(File cacheWorkDir, int partId) { + return new File(cacheWorkDir, String.format(PART_FILE_TEMPLATE, partId)).toPath(); + } + + /** */ + public static void main(String[] args) throws Exception { + new FileStoreHeapUtilizationJolBenchmark().benchmark(); + } +} diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/GridAffinityAssignmentJolBenchmark.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/GridAffinityAssignmentJolBenchmark.java new file mode 100644 index 0000000000000..f154341f07c33 --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/jol/GridAffinityAssignmentJolBenchmark.java @@ -0,0 +1,342 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.jol; + +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.concurrent.ConcurrentSkipListMap; +import org.apache.ignite.cache.CacheMetrics; +import org.apache.ignite.cache.affinity.AffinityFunctionContext; +import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; +import org.apache.ignite.cluster.ClusterMetrics; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.events.DiscoveryEvent; +import org.apache.ignite.internal.processors.affinity.AffinityAssignment; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentV2; +import org.apache.ignite.internal.processors.affinity.GridAffinityFunctionContextImpl; +import org.apache.ignite.internal.processors.affinity.HistoryAffinityAssignment; +import org.apache.ignite.internal.processors.affinity.HistoryAffinityAssignmentImpl; +import org.apache.ignite.lang.IgniteProductVersion; +import org.apache.ignite.spi.discovery.DiscoveryMetricsProvider; +import org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode; +import org.openjdk.jol.info.GraphLayout; + +/** + * + */ +public class GridAffinityAssignmentJolBenchmark { + /** */ + private static DiscoveryMetricsProvider metrics = new DiscoveryMetricsProvider() { + @Override public ClusterMetrics metrics() { + return null; + } + + @Override public Map cacheMetrics() { + return null; + } + }; + + /** */ + private static IgniteProductVersion ver = new IgniteProductVersion(); + + /** */ + private static Field field; + + /** */ + public static void main(String[] args) throws Exception { + RendezvousAffinityFunction aff = new RendezvousAffinityFunction(true, 65000); + + int[] parts = new int[] {1024, 8192, 32768, 65000}; + + int[] nodes = new int[] {1, 16, 160, 600}; + + // We need to implement compressed bitsets https://issues.apache.org/jira/browse/IGNITE-4554. + // On 65k partitions and nodes > 700 HashSet take advantage over BitSet. + // After implementation need to check consumption on big clusters. + for (int part : parts) + for (int node : nodes) { + measure(aff, part, node, 0); + + measure(aff, part, node, 3); + + measure(aff, part, node, node); + } + + // Measure history assignment for normal and huge partition count. + // Nodes count doesn't affect heap occupation. + // Best result is achieved when running one measure at a time with large enough size of new region + // (to avoid object relocation). + measureHistory(1024, 32, 0); + measureHistory(1024, 32, 1); + measureHistory(1024, 32, 2); + measureHistory(1024, 32, Integer.MAX_VALUE); + measureHistory(32768, 32, 0); + measureHistory(32768, 32, 1); + measureHistory(32768, 32, 2); + measureHistory(32768, 32, Integer.MAX_VALUE); + } + + /** + * @param disabled Disabled. + */ + private static void setOptimization(boolean disabled) throws NoSuchFieldException, IllegalAccessException { + if (field == null) { + field = AffinityAssignment.class.getDeclaredField("IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION"); + + Field modifiersField = Field.class.getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + + field.setAccessible(true); + } + + field.set(null, disabled); + } + + /** + * @param aff Aff. + * @param parts Parts. + * @param nodeCnt Node count. + * @param backups Backups. + */ + private static void measure( + RendezvousAffinityFunction aff, + int parts, + int nodeCnt, + int backups + ) throws Exception { + List nodes = new ArrayList<>(); + + for (int i = 0; i < nodeCnt; i++) { + ClusterNode node = node(i); + nodes.add(node); + } + + AffinityFunctionContext ctx = new GridAffinityFunctionContextImpl( + nodes, + new ArrayList<>(), + new DiscoveryEvent(), + new AffinityTopologyVersion(), + backups + ); + + List> assignment = aff.assignPartitions(ctx); + + setOptimization(false); + + GridAffinityAssignmentV2 ga = new GridAffinityAssignmentV2( + new AffinityTopologyVersion(1, 0), + assignment, + new ArrayList<>() + ); + + System.gc(); + + long totalSize = GraphLayout.parseInstance(ga).totalSize(); + + System.out.println("Optimized, parts " + parts + +" nodeCount " + nodeCnt + +" backups " + backups + + " " + totalSize); + + setOptimization(true); + + GridAffinityAssignmentV2 ga2 = new GridAffinityAssignmentV2( + new AffinityTopologyVersion(1, 0), + assignment, + new ArrayList<>() + ); + + System.gc(); + + long totalSize2 = GraphLayout.parseInstance(ga2).totalSize(); + + System.out.println("Deoptimized, parts " + parts + +" nodeCount " + nodeCnt + +" backups " + backups + + " " + totalSize2); + + if (totalSize > totalSize2) + throw new Exception("Optimized AffinityAssignment size " + totalSize + " is more than deoptimized " + totalSize2); + } + + /** + * @param parts Parts. + * @param nodes Nodes. + */ + private static void measureHistory(int parts, int nodes, int backups) throws Exception { + System.gc(); + + long deopt = measureHistory0(parts, nodes, true, backups); + + System.gc(); + + long opt = measureHistory0(parts, nodes, false, backups); + + if (opt > deopt) + throw new Exception("Optimized HistoryAffinityAssignment size " + opt + " is more than deoptimized " + deopt); + + float rate = deopt / (float)opt; + + System.out.println("Optimization: optimized=" + opt + ", deoptimized=" + deopt + " rate: " + ((int)(rate * 1000)) / 1000. ); + } + + /** + * @param parts Parts. + * @param nodeCnt Node count. + * @param disableOptimization Disable optimization. + */ + private static long measureHistory0(int parts, int nodeCnt, boolean disableOptimization, int backups) throws Exception { + System.gc(); + + setOptimization(disableOptimization); + + RendezvousAffinityFunction aff = new RendezvousAffinityFunction(true, parts); + + List nodes = new ArrayList<>(nodeCnt); + + nodes.add(node(0)); + + Map affCache = new ConcurrentSkipListMap<>(); + + List> prevAssignment = new ArrayList<>(); + + prevAssignment = aff.assignPartitions(context(new ArrayList<>(nodes), prevAssignment, 1)); + + for (int i = 1; i < nodeCnt; i++) { + ClusterNode newNode = node(i); + + nodes.add(newNode); + + List> idealAssignment = aff.assignPartitions(context(new ArrayList<>(nodes), prevAssignment, backups)); + + List> lateAssignmemnt = new ArrayList<>(parts); + + for (int j = 0; j < idealAssignment.size(); j++) { + List ideal0 = idealAssignment.get(j); + List prev = prevAssignment.get(j); + + ClusterNode curPrimary = prev.get(0); + + if (!curPrimary.equals(ideal0.get(0))) { + List cpy = new ArrayList<>(ideal0); + + cpy.remove(curPrimary); + cpy.add(0, curPrimary); + + lateAssignmemnt.add(cpy); + } + else + lateAssignmemnt.add(ideal0); + } + + AffinityTopologyVersion topVer = new AffinityTopologyVersion(i + 1, 0); + GridAffinityAssignmentV2 a = new GridAffinityAssignmentV2(topVer, lateAssignmemnt, idealAssignment); + HistoryAffinityAssignment h = new HistoryAffinityAssignmentImpl(a, backups); + + if (!lateAssignmemnt.equals(h.assignment())) + throw new RuntimeException(); + + if (!idealAssignment.equals(h.idealAssignment())) + throw new RuntimeException(); + + affCache.put(topVer, h); + + AffinityTopologyVersion topVer0 = new AffinityTopologyVersion(i + 1, 1); + + List> assignment = new ArrayList<>(parts); + + for (int j = 0; j < idealAssignment.size(); j++) { + List clusterNodes = idealAssignment.get(j); + + assignment.add(clusterNodes); + } + + GridAffinityAssignmentV2 a0 = new GridAffinityAssignmentV2(topVer0, assignment, idealAssignment); + HistoryAffinityAssignment h0 = new HistoryAffinityAssignmentImpl(a0, backups); + + if (!assignment.equals(h0.assignment())) + throw new RuntimeException(); + + if (!idealAssignment.equals(h0.idealAssignment())) + throw new RuntimeException(); + + affCache.put(topVer0, h0); + + prevAssignment = idealAssignment; + } + + System.gc(); + + GraphLayout l = GraphLayout.parseInstance(affCache); + + // Exclude nodes from estimation. + GraphLayout l2 = GraphLayout.parseInstance(nodes.toArray(new Object[nodes.size()])); + + GraphLayout l3 = l.subtract(l2); + + System.out.println("Heap usage [optimized=" + !disableOptimization + ", parts=" + parts + + ", nodeCnt=" + nodeCnt + + ", backups=" + backups + + ", " + l3.toFootprint() + ']'); + + return l3.totalSize(); + } + + /** + * @param nodes Nodes. + * @param prevAssignment Prev assignment. + * @param backups Backups. + */ + private static AffinityFunctionContext context( + List nodes, + List> prevAssignment, + int backups) { + return new GridAffinityFunctionContextImpl( + nodes, + prevAssignment, + new DiscoveryEvent(), + new AffinityTopologyVersion(), + backups + ); + } + + /** + * @return New test node. + */ + private static ClusterNode node(int idx) { + TcpDiscoveryNode node = new TcpDiscoveryNode( + UUID.randomUUID(), + Collections.singletonList("127.0.0.1"), + Collections.singletonList("127.0.0.1"), + 0, + metrics, + ver, + "Node_" + idx + ); + node.setAttributes(Collections.emptyMap()); + + return node; + } +} diff --git a/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/model/Node.java b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/model/Node.java new file mode 100644 index 0000000000000..fd23edee23e0e --- /dev/null +++ b/modules/benchmarks/src/main/java/org/apache/ignite/internal/benchmarks/model/Node.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.benchmarks.model; + +import java.util.UUID; + +/** + * + */ +public class Node { + /** */ + private UUID uuid; + + /** + * + * @param uuid Uuid. + */ + public Node(UUID uuid) { + this.uuid = uuid; + } + + /** + * + * @return UUID. + */ + public UUID getUuid() { + return uuid; + } +} diff --git a/modules/camel/pom.xml b/modules/camel/pom.xml index a5017e18e263b..6cd725f5ba913 100644 --- a/modules/camel/pom.xml +++ b/modules/camel/pom.xml @@ -31,11 +31,11 @@ ignite-camel - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org - 18.0 + 25.1-jre 2.5.0 diff --git a/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTestSuite.java b/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTestSuite.java index fa7f5427d8629..c45272ed00c57 100644 --- a/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTestSuite.java +++ b/modules/camel/src/test/java/org/apache/ignite/stream/camel/IgniteCamelStreamerTestSuite.java @@ -21,7 +21,7 @@ import junit.framework.TestSuite; /** - * Camel streamer tests. + * Camel streamer tests. Included into 'Streamers' run configuration. */ public class IgniteCamelStreamerTestSuite extends TestSuite { /** diff --git a/modules/cassandra/pom.xml b/modules/cassandra/pom.xml index e01a95516ce71..e9ca8666a738a 100644 --- a/modules/cassandra/pom.xml +++ b/modules/cassandra/pom.xml @@ -32,7 +32,7 @@ ignite-cassandra pom - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org diff --git a/modules/cassandra/serializers/pom.xml b/modules/cassandra/serializers/pom.xml index 58c08a9369243..e1caabc445a80 100644 --- a/modules/cassandra/serializers/pom.xml +++ b/modules/cassandra/serializers/pom.xml @@ -26,12 +26,12 @@ org.apache.ignite ignite-cassandra - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT .. ignite-cassandra-serializers - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org diff --git a/modules/cassandra/store/pom.xml b/modules/cassandra/store/pom.xml index b62050b1c1218..8922a53472515 100644 --- a/modules/cassandra/store/pom.xml +++ b/modules/cassandra/store/pom.xml @@ -26,20 +26,20 @@ org.apache.ignite ignite-cassandra - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT .. ignite-cassandra-store - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org - 1.8.3 + 1.9.2 3.0.0 3.3 - 4.0.33.Final - 19.0 + 4.1.27.Final + 25.1-jre 3.0.2 @@ -108,6 +108,12 @@ ${netty.version} + + io.netty + netty-resolver + ${netty.version} + + com.google.guava guava @@ -120,6 +126,12 @@ ${metrics-core.version} + + org.slf4j + slf4j-api + ${slf4j.version} + + org.apache.cassandra cassandra-all @@ -130,6 +142,10 @@ log4j-over-slf4j org.slf4j + + commons-codec + commons-codec + diff --git a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java index 4fb0cb27d7c8f..4d59e54716115 100644 --- a/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java +++ b/modules/cassandra/store/src/main/java/org/apache/ignite/cache/store/cassandra/session/CassandraSessionImpl.java @@ -648,17 +648,25 @@ private void createKeyspace(KeyValuePersistenceSettings settings) { while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) { try { - log.info("-----------------------------------------------------------------------"); - log.info("Creating Cassandra keyspace '" + settings.getKeyspace() + "'"); - log.info("-----------------------------------------------------------------------\n\n" + - settings.getKeyspaceDDLStatement() + "\n"); - log.info("-----------------------------------------------------------------------"); + if (log.isInfoEnabled()) { + log.info("-----------------------------------------------------------------------"); + log.info("Creating Cassandra keyspace '" + settings.getKeyspace() + "'"); + log.info("-----------------------------------------------------------------------\n\n" + + settings.getKeyspaceDDLStatement() + "\n"); + log.info("-----------------------------------------------------------------------"); + } + session().execute(settings.getKeyspaceDDLStatement()); - log.info("Cassandra keyspace '" + settings.getKeyspace() + "' was successfully created"); + + if (log.isInfoEnabled()) + log.info("Cassandra keyspace '" + settings.getKeyspace() + "' was successfully created"); + return; } catch (AlreadyExistsException ignored) { - log.info("Cassandra keyspace '" + settings.getKeyspace() + "' already exist"); + if (log.isInfoEnabled()) + log.info("Cassandra keyspace '" + settings.getKeyspace() + "' already exist"); + return; } catch (Throwable e) { @@ -689,17 +697,25 @@ private void createTable(String table, KeyValuePersistenceSettings settings) { while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) { try { - log.info("-----------------------------------------------------------------------"); - log.info("Creating Cassandra table '" + tableFullName + "'"); - log.info("-----------------------------------------------------------------------\n\n" + + if (log.isInfoEnabled()) { + log.info("-----------------------------------------------------------------------"); + log.info("Creating Cassandra table '" + tableFullName + "'"); + log.info("-----------------------------------------------------------------------\n\n" + settings.getTableDDLStatement(table) + "\n"); - log.info("-----------------------------------------------------------------------"); + log.info("-----------------------------------------------------------------------"); + } + session().execute(settings.getTableDDLStatement(table)); - log.info("Cassandra table '" + tableFullName + "' was successfully created"); + + if (log.isInfoEnabled()) + log.info("Cassandra table '" + tableFullName + "' was successfully created"); + return; } catch (AlreadyExistsException ignored) { - log.info("Cassandra table '" + tableFullName + "' already exist"); + if (log.isInfoEnabled()) + log.info("Cassandra table '" + tableFullName + "' already exist"); + return; } catch (Throwable e) { @@ -741,14 +757,19 @@ private void createTableIndexes(String table, KeyValuePersistenceSettings settin while (attempt < CQL_EXECUTION_ATTEMPTS_COUNT) { try { - log.info("-----------------------------------------------------------------------"); - log.info("Creating indexes for Cassandra table '" + tableFullName + "'"); - log.info("-----------------------------------------------------------------------"); + if (log.isInfoEnabled()) { + log.info("-----------------------------------------------------------------------"); + log.info("Creating indexes for Cassandra table '" + tableFullName + "'"); + log.info("-----------------------------------------------------------------------"); + } for (String statement : indexDDLStatements) { try { - log.info(statement); - log.info("-----------------------------------------------------------------------"); + if (log.isInfoEnabled()) { + log.info(statement); + log.info("-----------------------------------------------------------------------"); + } + session().execute(statement); } catch (AlreadyExistsException ignored) { @@ -759,7 +780,8 @@ private void createTableIndexes(String table, KeyValuePersistenceSettings settin } } - log.info("Indexes for Cassandra table '" + tableFullName + "' were successfully created"); + if (log.isInfoEnabled()) + log.info("Indexes for Cassandra table '" + tableFullName + "' were successfully created"); return; } diff --git a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestTransaction.java b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestTransaction.java index e587bd7862282..be2211f801546 100644 --- a/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestTransaction.java +++ b/modules/cassandra/store/src/test/java/org/apache/ignite/tests/utils/TestTransaction.java @@ -146,6 +146,11 @@ public class TestTransaction implements Transaction { // No-op. } + /** {@inheritDoc} */ + @Nullable @Override public String label() { + return null; + } + /** {@inheritDoc} */ @Override public void resume() throws IgniteException { // No-op. diff --git a/modules/clients/pom.xml b/modules/clients/pom.xml index e75600b925e79..5f02027aad843 100644 --- a/modules/clients/pom.xml +++ b/modules/clients/pom.xml @@ -31,7 +31,7 @@ ignite-clients - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java index 102db57f0ddf2..0c272b9866e14 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/TaskEventSubjectIdSelfTest.java @@ -310,6 +310,9 @@ public void testClosure() throws Exception { } /** + * Events for class tasks that was started from external clients should contain + * client subject id instead of the node where it was started. This test checks it. + * * @throws Exception If failed. */ public void testClient() throws Exception { diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/client/ClientSslParametersTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/client/ClientSslParametersTest.java new file mode 100644 index 0000000000000..74f50ba70a77c --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/internal/client/ClientSslParametersTest.java @@ -0,0 +1,323 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client; + +import java.util.Collections; +import java.util.List; +import java.util.concurrent.Callable; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.ConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.client.ssl.GridSslBasicContextFactory; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.ssl.SslContextFactory; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; +import org.jetbrains.annotations.NotNull; + +/** + * Tests cases when node connects to cluster with different set of cipher suites. + */ +public class ClientSslParametersTest extends GridCommonAbstractTest { + /** */ + public static final String TEST_CACHE_NAME = "TEST"; + + /** */ + private volatile String[] cipherSuites; + + /** */ + private volatile String[] protocols; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(gridName); + + cfg.setSslContextFactory(createSslFactory()); + + cfg.setConnectorConfiguration(new ConnectorConfiguration() + .setSslEnabled(true) + .setSslClientAuth(true)); + + cfg.setCacheConfiguration(new CacheConfiguration(TEST_CACHE_NAME)); + + return cfg; + } + + /** + * @return Client configuration. + */ + protected GridClientConfiguration getClientConfiguration() { + GridClientConfiguration cfg = new GridClientConfiguration(); + + cfg.setServers(Collections.singleton("127.0.0.1:11211")); + + cfg.setSslContextFactory(createOldSslFactory()); + + return cfg; + } + + /** + * @return SSL factory. + */ + @NotNull private SslContextFactory createSslFactory() { + SslContextFactory factory = (SslContextFactory)GridTestUtils.sslFactory(); + + factory.setCipherSuites(cipherSuites); + + factory.setProtocols(protocols); + + return factory; + } + + /** + * @return SSL Factory. + */ + @NotNull private GridSslBasicContextFactory createOldSslFactory() { + GridSslBasicContextFactory factory = (GridSslBasicContextFactory)GridTestUtils.sslContextFactory(); + + factory.setCipherSuites(cipherSuites); + + factory.setProtocols(protocols); + + return factory; + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + protocols = null; + + cipherSuites = null; + } + + /** + * @throws Exception If failed. + */ + public void testSameCipherSuite() throws Exception { + cipherSuites = new String[] { + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256" + }; + + startGrid(); + + checkSuccessfulClientStart( + new String[] { + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256" + }, + null + ); + } + + /** + * @throws Exception If failed. + */ + public void testOneCommonCipherSuite() throws Exception { + cipherSuites = new String[] { + "TLS_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256" + }; + + startGrid(); + + checkSuccessfulClientStart( + new String[] { + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256" + }, + null + ); + } + + /** + * @throws Exception If failed. + */ + public void testNoCommonCipherSuite() throws Exception { + cipherSuites = new String[] { + "TLS_RSA_WITH_AES_128_GCM_SHA256" + }; + + startGrid(); + + checkClientStartFailure( + new String[] { + "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256" + }, + null + ); + } + + /** + * @throws Exception If failed. + */ + public void testNonExistentCipherSuite() throws Exception { + fail("https://issues.apache.org/jira/browse/IGNITE-10245"); + + cipherSuites = new String[] { + "TLS_RSA_WITH_AES_128_GCM_SHA256" + }; + + startGrid(); + + checkClientStartFailure( + new String[] { + "TLC_FAKE_CIPHER", + "TLS_DHE_RSA_WITH_AES_128_GCM_SHA256" + }, + null, + "Unsupported ciphersuite" + ); + } + + /** + * @throws Exception If failed. + */ + public void testNoCommonProtocols() throws Exception { + protocols = new String[] { + "TLSv1.1", + "SSLv3" + }; + + startGrid(); + + checkClientStartFailure( + null, + new String[] { + "TLSv1", + "TLSv1.2" + } + ); + } + + /** + * @throws Exception If failed. + */ + public void testNonExistentProtocol() throws Exception { + fail("https://issues.apache.org/jira/browse/IGNITE-10245"); + + protocols = new String[] { + "SSLv3" + }; + + startGrid(); + + checkClientStartFailure( + null, + new String[] { + "SSLv3", + "SSLvDoesNotExist" + }, + "SSLvDoesNotExist" + ); + } + + /** + * @throws Exception If failed. + */ + public void testSameProtocols() throws Exception { + protocols = new String[] { + "TLSv1.1", + "TLSv1.2" + }; + + startGrid(); + + checkSuccessfulClientStart( + null, + new String[] { + "TLSv1.1", + "TLSv1.2" + } + ); + } + + /** + * @throws Exception If failed. + */ + public void testOneCommonProtocol() throws Exception { + protocols = new String[] { + "TLSv1", + "TLSv1.1", + "TLSv1.2" + }; + + startGrid(); + + checkSuccessfulClientStart( + null, + new String[] { + "TLSv1.1", + "SSLv3" + } + ); + } + + /** + * @param cipherSuites list of cipher suites + * @param protocols list of protocols + * @throws Exception If failed. + */ + private void checkSuccessfulClientStart(String[] cipherSuites, String[] protocols) throws Exception { + this.cipherSuites = F.isEmpty(cipherSuites) ? null : cipherSuites; + this.protocols = F.isEmpty(protocols) ? null : protocols; + + try (GridClient client = GridClientFactory.start(getClientConfiguration())) { + List top = client.compute().refreshTopology(false, false); + + assertEquals(1, top.size()); + } + } + + /** + * @param cipherSuites list of cipher suites + * @param protocols list of protocols + */ + private void checkClientStartFailure(String[] cipherSuites, String[] protocols) { + checkClientStartFailure(cipherSuites, protocols, "Latest topology update failed."); + } + + /** + * @param cipherSuites list of cipher suites + * @param protocols list of protocols + * @param msg exception message + */ + private void checkClientStartFailure(String[] cipherSuites, String[] protocols, String msg) { + this.cipherSuites = F.isEmpty(cipherSuites) ? null : cipherSuites; + this.protocols = F.isEmpty(protocols) ? null : protocols; + + GridTestUtils.assertThrows( + null, + new Callable() { + @Override public Object call() throws Exception { + GridClient client = GridClientFactory.start(getClientConfiguration()); + + client.compute().refreshTopology(false, false); + + return null; + } + }, + GridClientException.class, + msg + ); + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java index 79fcf38c67e1a..d6ef9f9e208fa 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/client/suite/IgniteClientTestSuite.java @@ -18,8 +18,11 @@ package org.apache.ignite.internal.client.suite; import junit.framework.TestSuite; +import org.apache.ignite.internal.IgniteClientFailuresTest; +import org.apache.ignite.internal.TaskEventSubjectIdSelfTest; import org.apache.ignite.internal.client.ClientDefaultCacheSelfTest; import org.apache.ignite.internal.client.ClientReconnectionSelfTest; +import org.apache.ignite.internal.client.ClientSslParametersTest; import org.apache.ignite.internal.client.ClientTcpMultiThreadedSelfTest; import org.apache.ignite.internal.client.ClientTcpSslAuthenticationSelfTest; import org.apache.ignite.internal.client.ClientTcpSslMultiThreadedSelfTest; @@ -49,7 +52,9 @@ import org.apache.ignite.internal.client.util.ClientConsistentHashSelfTest; import org.apache.ignite.internal.client.util.ClientJavaHasherSelfTest; import org.apache.ignite.internal.processors.rest.ClientMemcachedProtocolSelfTest; -import org.apache.ignite.internal.processors.rest.JettyRestProcessorAuthenticationSelfTest; +import org.apache.ignite.internal.processors.rest.JettyRestProcessorAuthenticationWithCredsSelfTest; +import org.apache.ignite.internal.processors.rest.JettyRestProcessorAuthenticationWithTokenSelfTest; +import org.apache.ignite.internal.processors.rest.JettyRestProcessorGetAllAsArrayTest; import org.apache.ignite.internal.processors.rest.JettyRestProcessorSignedSelfTest; import org.apache.ignite.internal.processors.rest.JettyRestProcessorUnsignedSelfTest; import org.apache.ignite.internal.processors.rest.RestBinaryProtocolSelfTest; @@ -57,8 +62,10 @@ import org.apache.ignite.internal.processors.rest.RestProcessorMultiStartSelfTest; import org.apache.ignite.internal.processors.rest.RestProcessorStartSelfTest; import org.apache.ignite.internal.processors.rest.TaskCommandHandlerSelfTest; +import org.apache.ignite.internal.processors.rest.TcpRestUnmarshalVulnerabilityTest; import org.apache.ignite.internal.processors.rest.protocols.tcp.TcpRestParserSelfTest; import org.apache.ignite.internal.processors.rest.protocols.tcp.redis.RedisProtocolConnectSelfTest; +import org.apache.ignite.internal.processors.rest.protocols.tcp.redis.RedisProtocolGetAllAsArrayTest; import org.apache.ignite.internal.processors.rest.protocols.tcp.redis.RedisProtocolServerSelfTest; import org.apache.ignite.internal.processors.rest.protocols.tcp.redis.RedisProtocolStringSelfTest; import org.apache.ignite.testframework.IgniteTestSuite; @@ -83,17 +90,21 @@ public static TestSuite suite() { // Test custom binary protocol with test client. suite.addTestSuite(RestBinaryProtocolSelfTest.class); + suite.addTestSuite(TcpRestUnmarshalVulnerabilityTest.class); // Test jetty rest processor suite.addTestSuite(JettyRestProcessorSignedSelfTest.class); suite.addTestSuite(JettyRestProcessorUnsignedSelfTest.class); - suite.addTestSuite(JettyRestProcessorAuthenticationSelfTest.class); + suite.addTestSuite(JettyRestProcessorAuthenticationWithCredsSelfTest.class); + suite.addTestSuite(JettyRestProcessorAuthenticationWithTokenSelfTest.class); + suite.addTestSuite(JettyRestProcessorGetAllAsArrayTest.class); // Test TCP rest processor with original memcache client. suite.addTestSuite(ClientMemcachedProtocolSelfTest.class); // Test TCP rest processor with original REDIS client. suite.addTestSuite(RedisProtocolStringSelfTest.class); + suite.addTestSuite(RedisProtocolGetAllAsArrayTest.class); suite.addTestSuite(RedisProtocolConnectSelfTest.class); suite.addTestSuite(RedisProtocolServerSelfTest.class); @@ -153,6 +164,11 @@ public static TestSuite suite() { suite.addTestSuite(ClientTcpTaskExecutionAfterTopologyRestartSelfTest.class); + // SSL params. + suite.addTestSuite(ClientSslParametersTest.class); + + suite.addTestSuite(IgniteClientFailuresTest.class); + return suite; } } diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcAbstractDmlStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcAbstractDmlStatementSelfTest.java index f4c0ca3464ed6..766d63729ff11 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcAbstractDmlStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcAbstractDmlStatementSelfTest.java @@ -136,10 +136,12 @@ protected String getCfgUrl() { /** {@inheritDoc} */ @Override protected void afterTest() throws Exception { - ((IgniteEx)ignite(0)).context().cache().dynamicDestroyCache(DEFAULT_CACHE_NAME, true, true, false); + ((IgniteEx)ignite(0)).context().cache().dynamicDestroyCache(DEFAULT_CACHE_NAME, true, true, false, null); - conn.close(); - assertTrue(conn.isClosed()); + if (conn != null) { + conn.close(); + assertTrue(conn.isClosed()); + } cleanUpWorkingDir(); } diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java index 9485d0d54212c..652d635cbe4a1 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcDynamicIndexAbstractSelfTest.java @@ -31,6 +31,7 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.testframework.GridTestUtils; /** * Test that checks indexes handling with JDBC. @@ -168,9 +169,9 @@ public void testCreateIndex() throws SQLException { public void testCreateIndexWithDuplicateName() throws SQLException { jdbcRun(CREATE_INDEX); - assertSqlException(new RunnableX() { + assertSqlException(new GridTestUtils.RunnableX() { /** {@inheritDoc} */ - @Override public void run() throws Exception { + @Override public void runx() throws Exception { jdbcRun(CREATE_INDEX); } }); @@ -219,9 +220,9 @@ public void testDropIndex() throws SQLException { * Test that dropping a non-existent index yields an error. */ public void testDropMissingIndex() { - assertSqlException(new RunnableX() { + assertSqlException(new GridTestUtils.RunnableX() { /** {@inheritDoc} */ - @Override public void run() throws Exception { + @Override public void runx() throws Exception { jdbcRun(DROP_INDEX); } }); @@ -310,11 +311,11 @@ private IgniteCache cache() { * * @param r Runnable. */ - private static void assertSqlException(RunnableX r) { + private static void assertSqlException(GridTestUtils.RunnableX r) { // We expect IgniteSQLException with given code inside CacheException inside JDBC SQLException. try { - r.run(); + r.runx(); } catch (SQLException e) { return; @@ -325,16 +326,4 @@ private static void assertSqlException(RunnableX r) { fail(SQLException.class.getSimpleName() + " is not thrown."); } - - /** - * Runnable which can throw checked exceptions. - */ - private interface RunnableX { - /** - * Do run. - * - * @throws Exception If failed. - */ - public void run() throws Exception; - } } diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcResultSetSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcResultSetSelfTest.java index 42134e74eed63..a58ee09b9b217 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcResultSetSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcResultSetSelfTest.java @@ -156,7 +156,7 @@ private TestObject createObjectWithData(int id) throws MalformedURLException { o.floatVal = 1.0f; o.doubleVal = 1.0d; o.bigVal = new BigDecimal(1); - o.strVal = "str"; + o.strVal = "1"; o.arrVal = new byte[] {1}; o.dateVal = new Date(1, 1, 1); o.timeVal = new Time(1, 1, 1); @@ -178,12 +178,72 @@ public void testBoolean() throws Exception { if (cnt == 0) { assert rs.getBoolean("boolVal"); assert rs.getBoolean(2); + assert rs.getByte(2) == 1; + assert rs.getInt(2) == 1; + assert rs.getShort(2) == 1; + assert rs.getLong(2) == 1; + assert rs.getDouble(2) == 1.0; + assert rs.getFloat(2) == 1.0f; + assert rs.getBigDecimal(2).equals(new BigDecimal(1)); + assert rs.getString(2).equals("true"); + + assert rs.getObject(2, Boolean.class); + assert rs.getObject(2, Byte.class) == 1; + assert rs.getObject(2, Short.class) == 1; + assert rs.getObject(2, Integer.class) == 1; + assert rs.getObject(2, Long.class) == 1; + assert rs.getObject(2, Float.class) == 1.f; + assert rs.getObject(2, Double.class) == 1; + assert rs.getObject(2, BigDecimal.class).equals(new BigDecimal(1)); + assert rs.getObject(2, String.class).equals("true"); } cnt++; } assert cnt == 1; + + ResultSet rs0 = stmt.executeQuery("select 1"); + + assert rs0.next(); + assert rs0.getBoolean(1); + + rs0 = stmt.executeQuery("select 0"); + + assert rs0.next(); + assert !rs0.getBoolean(1); + + rs0 = stmt.executeQuery("select '1'"); + + assert rs0.next(); + assert rs0.getBoolean(1); + + rs0 = stmt.executeQuery("select '0'"); + + assert rs0.next(); + assert !rs0.getBoolean(1); + + GridTestUtils.assertThrowsAnyCause(log, new Callable() { + @Override public Void call() throws Exception { + ResultSet rs0 = stmt.executeQuery("select ''"); + + assert rs0.next(); + assert rs0.getBoolean(1); + + return null; + } + }, SQLException.class, "Cannot convert to boolean: "); + + GridTestUtils.assertThrowsAnyCause(log, new Callable() { + @Override public Void call() throws Exception { + ResultSet rs0 = stmt.executeQuery("select 'qwe'"); + + assert rs0.next(); + assert rs0.getBoolean(1); + + return null; + } + }, SQLException.class, "Cannot convert to boolean: qwe"); } /** @@ -258,6 +318,26 @@ public void testByte() throws Exception { if (cnt == 0) { assert rs.getByte("byteVal") == 1; assert rs.getByte(3) == 1; + + assert rs.getBoolean(3); + assert rs.getByte(3) == 1; + assert rs.getInt(3) == 1; + assert rs.getShort(3) == 1; + assert rs.getLong(3) == 1; + assert rs.getDouble(3) == 1.0; + assert rs.getFloat(3) == 1.0f; + assert rs.getBigDecimal(3).equals(new BigDecimal(1)); + assert rs.getString(3).equals("1"); + + assert rs.getObject(3, Boolean.class); + assert rs.getObject(3, Byte.class) == 1; + assert rs.getObject(3, Short.class) == 1; + assert rs.getObject(3, Integer.class) == 1; + assert rs.getObject(3, Long.class) == 1; + assert rs.getObject(3, Float.class) == 1.f; + assert rs.getObject(3, Double.class) == 1; + assert rs.getObject(3, BigDecimal.class).equals(new BigDecimal(1)); + assert rs.getObject(3, String.class).equals("1"); } cnt++; @@ -278,6 +358,26 @@ public void testShort() throws Exception { if (cnt == 0) { assert rs.getShort("shortVal") == 1; assert rs.getShort(4) == 1; + + assert rs.getBoolean(4); + assert rs.getByte(4) == 1; + assert rs.getShort(4) == 1; + assert rs.getInt(4) == 1; + assert rs.getLong(4) == 1; + assert rs.getDouble(4) == 1.0; + assert rs.getFloat(4) == 1.0f; + assert rs.getBigDecimal(4).equals(new BigDecimal(1)); + assert rs.getString(4).equals("1"); + + assert rs.getObject(4, Boolean.class); + assert rs.getObject(4, Byte.class) == 1; + assert rs.getObject(4, Short.class) == 1; + assert rs.getObject(4, Integer.class) == 1; + assert rs.getObject(4, Long.class) == 1; + assert rs.getObject(4, Float.class) == 1.f; + assert rs.getObject(4, Double.class) == 1; + assert rs.getObject(4, BigDecimal.class).equals(new BigDecimal(1)); + assert rs.getObject(4, String.class).equals("1"); } cnt++; @@ -298,6 +398,26 @@ public void testInteger() throws Exception { if (cnt == 0) { assert rs.getInt("intVal") == 1; assert rs.getInt(5) == 1; + + assert rs.getBoolean(5); + assert rs.getByte(5) == 1; + assert rs.getShort(5) == 1; + assert rs.getInt(5) == 1; + assert rs.getLong(5) == 1; + assert rs.getDouble(5) == 1.0; + assert rs.getFloat(5) == 1.0f; + assert rs.getBigDecimal(5).equals(new BigDecimal(1)); + assert rs.getString(5).equals("1"); + + assert rs.getObject(5, Boolean.class); + assert rs.getObject(5, Byte.class) == 1; + assert rs.getObject(5, Short.class) == 1; + assert rs.getObject(5, Integer.class) == 1; + assert rs.getObject(5, Long.class) == 1; + assert rs.getObject(5, Float.class) == 1.f; + assert rs.getObject(5, Double.class) == 1; + assert rs.getObject(5, BigDecimal.class).equals(new BigDecimal(1)); + assert rs.getObject(5, String.class).equals("1"); } cnt++; @@ -318,6 +438,26 @@ public void testLong() throws Exception { if (cnt == 0) { assert rs.getLong("longVal") == 1; assert rs.getLong(6) == 1; + + assert rs.getBoolean(6); + assert rs.getByte(6) == 1; + assert rs.getShort(6) == 1; + assert rs.getInt(6) == 1; + assert rs.getLong(6) == 1; + assert rs.getDouble(6) == 1.0; + assert rs.getFloat(6) == 1.0f; + assert rs.getBigDecimal(6).equals(new BigDecimal(1)); + assert rs.getString(6).equals("1"); + + assert rs.getObject(6, Boolean.class); + assert rs.getObject(6, Byte.class) == 1; + assert rs.getObject(6, Short.class) == 1; + assert rs.getObject(6, Integer.class) == 1; + assert rs.getObject(6, Long.class) == 1; + assert rs.getObject(6, Float.class) == 1.f; + assert rs.getObject(6, Double.class) == 1; + assert rs.getObject(6, BigDecimal.class).equals(new BigDecimal(1)); + assert rs.getObject(6, String.class).equals("1"); } cnt++; @@ -338,6 +478,26 @@ public void testFloat() throws Exception { if (cnt == 0) { assert rs.getFloat("floatVal") == 1.0; assert rs.getFloat(7) == 1.0; + + assert rs.getBoolean(7); + assert rs.getByte(7) == 1; + assert rs.getShort(7) == 1; + assert rs.getInt(7) == 1; + assert rs.getLong(7) == 1; + assert rs.getDouble(7) == 1.0; + assert rs.getFloat(7) == 1.0f; + assert rs.getBigDecimal(7).equals(new BigDecimal(1)); + assert rs.getString(7).equals("1.0"); + + assert rs.getObject(7, Boolean.class); + assert rs.getObject(7, Byte.class) == 1; + assert rs.getObject(7, Short.class) == 1; + assert rs.getObject(7, Integer.class) == 1; + assert rs.getObject(7, Long.class) == 1; + assert rs.getObject(7, Float.class) == 1.f; + assert rs.getObject(7, Double.class) == 1; + assert rs.getObject(7, BigDecimal.class).equals(new BigDecimal(1)); + assert rs.getObject(7, String.class).equals("1.0"); } cnt++; @@ -358,6 +518,26 @@ public void testDouble() throws Exception { if (cnt == 0) { assert rs.getDouble("doubleVal") == 1.0; assert rs.getDouble(8) == 1.0; + + assert rs.getBoolean(8); + assert rs.getByte(8) == 1; + assert rs.getShort(8) == 1; + assert rs.getInt(8) == 1; + assert rs.getLong(8) == 1; + assert rs.getDouble(8) == 1.0; + assert rs.getFloat(8) == 1.0f; + assert rs.getBigDecimal(8).equals(new BigDecimal(1)); + assert rs.getString(8).equals("1.0"); + + assert rs.getObject(8, Boolean.class); + assert rs.getObject(8, Byte.class) == 1; + assert rs.getObject(8, Short.class) == 1; + assert rs.getObject(8, Integer.class) == 1; + assert rs.getObject(8, Long.class) == 1; + assert rs.getObject(8, Float.class) == 1.f; + assert rs.getObject(8, Double.class) == 1; + assert rs.getObject(8, BigDecimal.class).equals(new BigDecimal(1)); + assert rs.getObject(8, String.class).equals("1.0"); } cnt++; @@ -378,6 +558,26 @@ public void testBigDecimal() throws Exception { if (cnt == 0) { assert rs.getBigDecimal("bigVal").intValue() == 1; assert rs.getBigDecimal(9).intValue() == 1; + + assert rs.getBoolean(9); + assert rs.getByte(9) == 1; + assert rs.getShort(9) == 1; + assert rs.getInt(9) == 1; + assert rs.getLong(9) == 1; + assert rs.getDouble(9) == 1.0; + assert rs.getFloat(9) == 1.0f; + assert rs.getBigDecimal(9).equals(new BigDecimal(1)); + assert rs.getString(9).equals("1"); + + assert rs.getObject(9, Boolean.class); + assert rs.getObject(9, Byte.class) == 1; + assert rs.getObject(9, Short.class) == 1; + assert rs.getObject(9, Integer.class) == 1; + assert rs.getObject(9, Long.class) == 1; + assert rs.getObject(9, Float.class) == 1.f; + assert rs.getObject(9, Double.class) == 1; + assert rs.getObject(9, BigDecimal.class).equals(new BigDecimal(1)); + assert rs.getObject(9, String.class).equals("1"); } cnt++; @@ -386,6 +586,30 @@ public void testBigDecimal() throws Exception { assert cnt == 1; } + /** + * @throws Exception If failed. + */ + public void testBigDecimalScale() throws Exception { + assert "0.12".equals(convertStringToBigDecimalViaJdbc("0.1234", 2).toString()); + assert "1.001".equals(convertStringToBigDecimalViaJdbc("1.0005", 3).toString()); + assert "1E+3".equals(convertStringToBigDecimalViaJdbc("1205.5", -3).toString()); + assert "1.3E+4".equals(convertStringToBigDecimalViaJdbc("12505.5", -3).toString()); + } + + /** + * @param strDec String representation of a decimal value. + * @param scale Scale. + * @return BigDecimal object. + * @throws SQLException On error. + */ + private BigDecimal convertStringToBigDecimalViaJdbc(String strDec, int scale) throws SQLException { + try(ResultSet rs = stmt.executeQuery("select '" + strDec + "'")) { + assert rs.next(); + + return rs.getBigDecimal(1, scale); + } + } + /** * @throws Exception If failed. */ @@ -396,8 +620,27 @@ public void testString() throws Exception { while (rs.next()) { if (cnt == 0) { - assert "str".equals(rs.getString("strVal")); - assert "str".equals(rs.getString(10)); + assert "1".equals(rs.getString("strVal")); + + assert rs.getBoolean(10); + assert rs.getByte(10) == 1; + assert rs.getShort(10) == 1; + assert rs.getInt(10) == 1; + assert rs.getLong(10) == 1; + assert rs.getDouble(10) == 1.0; + assert rs.getFloat(10) == 1.0f; + assert rs.getBigDecimal(10).equals(new BigDecimal("1")); + assert rs.getString(10).equals("1"); + + assert rs.getObject(10, Boolean.class); + assert rs.getObject(10, Byte.class) == 1; + assert rs.getObject(10, Short.class) == 1; + assert rs.getObject(10, Integer.class) == 1; + assert rs.getObject(10, Long.class) == 1; + assert rs.getObject(10, Float.class) == 1.f; + assert rs.getObject(10, Double.class) == 1; + assert rs.getObject(10, BigDecimal.class).equals(new BigDecimal(1)); + assert rs.getObject(10, String.class).equals("1"); } cnt++; @@ -438,7 +681,14 @@ public void testDate() throws Exception { while (rs.next()) { if (cnt == 0) { assert rs.getDate("dateVal").equals(new Date(1, 1, 1)); + assert rs.getDate(12).equals(new Date(1, 1, 1)); + assert rs.getTime(12).equals(new Time(new Date(1, 1, 1).getTime())); + assert rs.getTimestamp(12).equals(new Timestamp(new Date(1, 1, 1).getTime())); + + assert rs.getObject(12, Date.class).equals(new Date(1, 1, 1)); + assert rs.getObject(12, Time.class).equals(new Time(new Date(1, 1, 1).getTime())); + assert rs.getObject(12, Timestamp.class).equals(new Timestamp(new Date(1, 1, 1).getTime())); } cnt++; @@ -459,7 +709,14 @@ public void testTime() throws Exception { while (rs.next()) { if (cnt == 0) { assert rs.getTime("timeVal").equals(new Time(1, 1, 1)); + + assert rs.getDate(13).equals(new Date(new Time(1, 1, 1).getTime())); assert rs.getTime(13).equals(new Time(1, 1, 1)); + assert rs.getTimestamp(13).equals(new Timestamp(new Time(1, 1, 1).getTime())); + + assert rs.getObject(13, Date.class).equals(new Date(new Time(1, 1, 1).getTime())); + assert rs.getObject(13, Time.class).equals(new Time(1, 1, 1)); + assert rs.getObject(13, Timestamp.class).equals(new Timestamp(new Time(1, 1, 1).getTime())); } cnt++; @@ -479,7 +736,14 @@ public void testTimestamp() throws Exception { while (rs.next()) { if (cnt == 0) { assert rs.getTimestamp("tsVal").getTime() == 1; - assert rs.getTimestamp(14).getTime() == 1; + + assert rs.getDate(14).equals(new Date(new Timestamp(1).getTime())); + assert rs.getTime(14).equals(new Time(new Timestamp(1).getTime())); + assert rs.getTimestamp(14).equals(new Timestamp(1)); + + assert rs.getObject(14, Date.class).equals(new Date(new Timestamp(1).getTime())); + assert rs.getObject(14, Time.class).equals(new Time(new Timestamp(1).getTime())); + assert rs.getObject(14, Timestamp.class).equals(new Timestamp(1)); } cnt++; diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java index 10adedcf3ec66..e302529404d70 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/jdbc2/JdbcStreamingSelfTest.java @@ -56,9 +56,6 @@ public class JdbcStreamingSelfTest extends GridCommonAbstractTest { private static final String STREAMING_URL = CFG_URL_PREFIX + "cache=person@modules/clients/src/test/config/jdbc-config.xml"; - /** */ - protected transient IgniteLogger log; - /** {@inheritDoc} */ @Override protected IgniteConfiguration getConfiguration(String gridName) throws Exception { return getConfiguration0(gridName); diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/AbstractRestProcessorSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/AbstractRestProcessorSelfTest.java index 712b71a0c4cc7..e5c658ccfae82 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/AbstractRestProcessorSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/AbstractRestProcessorSelfTest.java @@ -29,7 +29,7 @@ /** * Abstract class for REST protocols tests. */ -abstract class AbstractRestProcessorSelfTest extends GridCommonAbstractTest { +public abstract class AbstractRestProcessorSelfTest extends GridCommonAbstractTest { /** IP finder. */ private static final TcpDiscoveryIpFinder IP_FINDER = new TcpDiscoveryVmIpFinder(true); @@ -43,12 +43,9 @@ abstract class AbstractRestProcessorSelfTest extends GridCommonAbstractTest { /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { - startGrids(gridCount()); - } + cleanPersistenceDir(); - /** {@inheritDoc} */ - @Override protected void afterTestsStopped() throws Exception { - stopAllGrids(); + startGrids(gridCount()); } /** {@inheritDoc} */ @@ -60,7 +57,14 @@ abstract class AbstractRestProcessorSelfTest extends GridCommonAbstractTest { @Override protected void afterTest() throws Exception { jcache().clear(); - assertTrue(jcache().localSize() == 0); + assertEquals(0, jcache().localSize()); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + stopAllGrids(); + + cleanPersistenceDir(); } /** {@inheritDoc} */ diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java index 5dc44c4145516..bcd66f0a4de8a 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAbstractSelfTest.java @@ -17,14 +17,10 @@ package org.apache.ignite.internal.processors.rest; +import com.fasterxml.jackson.databind.JsonNode; import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; -import java.io.LineNumberReader; import java.io.Serializable; import java.io.UnsupportedEncodingException; -import java.net.URL; -import java.net.URLConnection; import java.net.URLEncoder; import java.nio.charset.StandardCharsets; import java.sql.Date; @@ -34,16 +30,11 @@ import java.util.Collection; import java.util.HashMap; import java.util.Iterator; -import java.util.LinkedHashMap; import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; - -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; - import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.CacheWriteSynchronizationMode; @@ -61,7 +52,6 @@ import org.apache.ignite.internal.processors.cache.query.GridCacheSqlIndexMetadata; import org.apache.ignite.internal.processors.cache.query.GridCacheSqlMetadata; import org.apache.ignite.internal.processors.rest.handlers.GridRestCommandHandler; -import org.apache.ignite.internal.processors.rest.protocols.http.jetty.GridJettyObjectMapper; import org.apache.ignite.internal.util.typedef.C1; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.P1; @@ -148,7 +138,6 @@ import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.testframework.GridTestUtils; -import static org.apache.ignite.IgniteSystemProperties.IGNITE_JETTY_PORT; import static org.apache.ignite.cache.CacheMode.PARTITIONED; import static org.apache.ignite.cache.CacheMode.REPLICATED; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_ASYNC; @@ -163,120 +152,22 @@ * Tests for Jetty REST protocol. */ @SuppressWarnings("unchecked") -public abstract class JettyRestProcessorAbstractSelfTest extends AbstractRestProcessorSelfTest { - /** Grid count. */ - private static final int GRID_CNT = 3; - +public abstract class JettyRestProcessorAbstractSelfTest extends JettyRestProcessorCommonSelfTest { /** Used to sent request charset. */ private static final String CHARSET = StandardCharsets.UTF_8.name(); - /** JSON to java mapper. */ - private static final ObjectMapper JSON_MAPPER = new GridJettyObjectMapper(); - /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { - System.setProperty(IGNITE_JETTY_PORT, Integer.toString(restPort())); - super.beforeTestsStarted(); initCache(); } - /** {@inheritDoc} */ - @Override protected void afterTestsStopped() throws Exception { - super.afterTestsStopped(); - - System.clearProperty(IGNITE_JETTY_PORT); - } - /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { grid(0).cache(DEFAULT_CACHE_NAME).removeAll(); } - /** {@inheritDoc} */ - @Override protected int gridCount() { - return GRID_CNT; - } - - /** - * @return Port to use for rest. Needs to be changed over time because Jetty has some delay before port unbind. - */ - protected abstract int restPort(); - - /** - * @return Test URL - */ - protected String restUrl() { - return "http://" + LOC_HOST + ":" + restPort() + "/ignite?"; - } - - /** - * @return Security enabled flag. Should be the same with {@code ctx.security().enabled()}. - */ - protected boolean securityEnabled() { - return false; - } - - /** - * Execute REST command and return result. - * - * @param params Command parameters. - * @return Returned content. - * @throws Exception If failed. - */ - protected String content(Map params) throws Exception { - SB sb = new SB(restUrl()); - - for (Map.Entry e : params.entrySet()) - sb.a(e.getKey()).a('=').a(e.getValue()).a('&'); - - URL url = new URL(sb.toString()); - - URLConnection conn = url.openConnection(); - - String signature = signature(); - - if (signature != null) - conn.setRequestProperty("X-Signature", signature); - - InputStream in = conn.getInputStream(); - - StringBuilder buf = new StringBuilder(256); - - try (LineNumberReader rdr = new LineNumberReader(new InputStreamReader(in, "UTF-8"))) { - for (String line = rdr.readLine(); line != null; line = rdr.readLine()) - buf.append(line); - } - - return buf.toString(); - } - - /** - * @param cacheName Optional cache name. - * @param cmd REST command. - * @param params Command parameters. - * @return Returned content. - * @throws Exception If failed. - */ - protected String content(String cacheName, GridRestCommand cmd, String... params) throws Exception { - Map paramsMap = new LinkedHashMap<>(); - - if (cacheName != null) - paramsMap.put("cacheName", cacheName); - - paramsMap.put("cmd", cmd.key()); - - if (params != null) { - assertEquals(0, params.length % 2); - - for (int i = 0; i < params.length; i += 2) - paramsMap.put(params[i], params[i + 1]); - } - - return content(paramsMap); - } - /** * @param content Content to check. * @param err Error message. @@ -294,14 +185,19 @@ protected void assertResponseContainsError(String content, String err) throws IO /** * @param content Content to check. + * @return JSON node with actual response. */ - private JsonNode jsonCacheOperationResponse(String content, boolean bulk) throws IOException { + protected JsonNode assertResponseSucceeded(String content, boolean bulk) throws IOException { assertNotNull(content); assertFalse(content.isEmpty()); JsonNode node = JSON_MAPPER.readTree(content); - assertEquals(bulk, node.get("affinityNodeId").isNull()); + JsonNode affNode = node.get("affinityNodeId"); + + if (affNode != null) + assertEquals(bulk, affNode.isNull()); + assertEquals(STATUS_SUCCESS, node.get("successStatus").asInt()); assertTrue(node.get("error").isNull()); @@ -315,7 +211,7 @@ private JsonNode jsonCacheOperationResponse(String content, boolean bulk) throws * @param res Response. */ private void assertCacheOperation(String content, Object res) throws IOException { - JsonNode ret = jsonCacheOperationResponse(content, false); + JsonNode ret = assertResponseSucceeded(content, false); assertEquals(String.valueOf(res), ret.isObject() ? ret.toString() : ret.asText()); } @@ -325,7 +221,7 @@ private void assertCacheOperation(String content, Object res) throws IOException * @param res Response. */ private void assertCacheBulkOperation(String content, Object res) throws IOException { - JsonNode ret = jsonCacheOperationResponse(content, true); + JsonNode ret = assertResponseSucceeded(content, true); assertEquals(String.valueOf(res), ret.asText()); } @@ -334,7 +230,7 @@ private void assertCacheBulkOperation(String content, Object res) throws IOExcep * @param content Content to check. */ private void assertCacheMetrics(String content) throws IOException { - JsonNode ret = jsonCacheOperationResponse(content, true); + JsonNode ret = assertResponseSucceeded(content, true); assertTrue(ret.isObject()); } @@ -349,7 +245,7 @@ protected JsonNode jsonResponse(String content) throws IOException { JsonNode node = JSON_MAPPER.readTree(content); - assertEquals(0, node.get("successStatus").asInt()); + assertEquals(STATUS_SUCCESS, node.get("successStatus").asInt()); assertTrue(node.get("error").isNull()); assertNotSame(securityEnabled(), node.get("sessionToken").isNull()); @@ -367,7 +263,7 @@ protected JsonNode jsonTaskResult(String content) throws IOException { JsonNode node = JSON_MAPPER.readTree(content); - assertEquals(0, node.get("successStatus").asInt()); + assertEquals(STATUS_SUCCESS, node.get("successStatus").asInt()); assertTrue(node.get("error").isNull()); assertFalse(node.get("response").isNull()); @@ -403,7 +299,7 @@ public void testGet() throws Exception { * @throws IOException If failed. */ private void checkJson(String json, Person p) throws IOException { - JsonNode res = jsonCacheOperationResponse(json, false); + JsonNode res = assertResponseSucceeded(json, false); assertEquals(p.id.intValue(), res.get("id").asInt()); assertEquals(p.getOrganizationId().intValue(), res.get("orgId").asInt()); @@ -455,7 +351,7 @@ public void testGetBinaryObjects() throws Exception { info("Get command result: " + ret); - JsonNode res = jsonCacheOperationResponse(ret, false); + JsonNode res = assertResponseSucceeded(ret, false); assertEquals("Alex", res.get("NAME").asText()); assertEquals(300, res.get("SALARY").asInt()); @@ -476,8 +372,9 @@ public void testGetBinaryObjects() throws Exception { info("Get command result: " + ret); - JsonNode json = jsonCacheOperationResponse(ret, false); + JsonNode json = assertResponseSucceeded(ret, false); assertEquals(ref1.name, json.get("name").asText()); + assertEquals(ref1.ref.toString(), json.get("ref").toString()); ref2.ref(ref1); @@ -552,7 +449,7 @@ public void testSimpleObject() throws Exception { info("Get command result: " + ret); - JsonNode res = jsonCacheOperationResponse(ret, false); + JsonNode res = assertResponseSucceeded(ret, false); assertEquals(p.id, res.get("id").asInt()); assertEquals(p.name, res.get("name").asText()); @@ -637,7 +534,7 @@ public void testTuple() throws Exception { info("Get command result: " + ret); - JsonNode res = jsonCacheOperationResponse(ret, false); + JsonNode res = assertResponseSucceeded(ret, false); assertEquals(t.getKey(), res.get("key").asText()); assertEquals(t.getValue(), res.get("value").asText()); @@ -775,11 +672,11 @@ public void testGetAll() throws Exception { info("Get all command result: " + ret); - JsonNode res = jsonCacheOperationResponse(ret, true); + JsonNode res = assertResponseSucceeded(ret, true); assertTrue(res.isObject()); - assertTrue(entries.equals(JSON_MAPPER.treeToValue(res, Map.class))); + assertEquals(entries, JSON_MAPPER.treeToValue(res, Map.class)); } /** @@ -789,7 +686,7 @@ public void testIncorrectPut() throws Exception { String ret = content(DEFAULT_CACHE_NAME, GridRestCommand.CACHE_PUT, "key", "key0"); assertResponseContainsError(ret, - "Failed to handle request: [req=CACHE_PUT, err=Failed to find mandatory parameter in request: val]"); + "Failed to find mandatory parameter in request: val"); } /** @@ -1100,7 +997,7 @@ public void testRemoveAll() throws Exception { assertNull(jcache().localPeek("rmvKey2")); assertNull(jcache().localPeek("rmvKey3")); assertNull(jcache().localPeek("rmvKey4")); - assertTrue(jcache().localSize() == 0); + assertEquals(0, jcache().localSize()); assertCacheBulkOperation(ret, true); } @@ -1353,20 +1250,20 @@ private void testMetadata(Collection metas, JsonNode arr) assertNotNull(keyClasses); assertFalse(keyClasses.isNull()); - assertTrue(meta.keyClasses().equals(JSON_MAPPER.treeToValue(keyClasses, Map.class))); + assertEquals(meta.keyClasses(), JSON_MAPPER.treeToValue(keyClasses, Map.class)); JsonNode valClasses = item.get("valClasses"); assertNotNull(valClasses); assertFalse(valClasses.isNull()); - assertTrue(meta.valClasses().equals(JSON_MAPPER.treeToValue(valClasses, Map.class))); + assertEquals(meta.valClasses(), JSON_MAPPER.treeToValue(valClasses, Map.class)); JsonNode fields = item.get("fields"); assertNotNull(fields); assertFalse(fields.isNull()); - assertTrue(meta.fields().equals(JSON_MAPPER.treeToValue(fields, Map.class))); + assertEquals(meta.fields(), JSON_MAPPER.treeToValue(fields, Map.class)); JsonNode indexesByType = item.get("indexes"); @@ -1493,7 +1390,7 @@ public void testTopology() throws Exception { JsonNode res = jsonResponse(ret); - assertEquals(GRID_CNT, res.size()); + assertEquals(gridCount(), res.size()); for (JsonNode node : res) { assertTrue(node.get("attributes").isNull()); @@ -1525,6 +1422,25 @@ public void testTopology() throws Exception { assertEquals(publicCache.getConfiguration(CacheConfiguration.class).getCacheMode(), cacheMode); } } + + // Test that caches not included. + ret = content(null, GridRestCommand.TOPOLOGY, + "attr", "false", + "mtr", "false", + "caches", "false" + ); + + info("Topology command result: " + ret); + + res = jsonResponse(ret); + + assertEquals(gridCount(), res.size()); + + for (JsonNode node : res) { + assertTrue(node.get("attributes").isNull()); + assertTrue(node.get("metrics").isNull()); + assertTrue(node.get("caches").isNull()); + } } /** @@ -1544,6 +1460,12 @@ public void testNode() throws Exception { assertTrue(res.get("attributes").isObject()); assertTrue(res.get("metrics").isObject()); + JsonNode caches = res.get("caches"); + + assertTrue(caches.isArray()); + assertFalse(caches.isNull()); + assertEquals(grid(0).context().cache().publicCaches().size(), caches.size()); + ret = content(null, GridRestCommand.NODE, "attr", "false", "mtr", "false", @@ -1569,6 +1491,22 @@ public void testNode() throws Exception { res = jsonResponse(ret); assertTrue(res.isNull()); + + // Check that caches not included. + ret = content(null, GridRestCommand.NODE, + "id", grid(0).localNode().id().toString(), + "attr", "false", + "mtr", "false", + "caches", "false" + ); + + info("Topology command result: " + ret); + + res = jsonResponse(ret); + + assertTrue(res.get("attributes").isNull()); + assertTrue(res.get("metrics").isNull()); + assertTrue(res.get("caches").isNull()); } /** @@ -2399,10 +2337,10 @@ public void testTypedPut() throws Exception { putTypedValue("timestamp", "2018-03-18%2001:01:01", "error", STATUS_FAILED); putTypedValue("timestamp", "error", "error", STATUS_FAILED); - IgniteCache cTimestamp = typedCache(); + IgniteCache cTs = typedCache(); - assertEquals(Timestamp.valueOf("2017-01-01 02:02:02"), cTimestamp.get(Timestamp.valueOf("2018-02-18 01:01:01"))); - assertEquals(Timestamp.valueOf("2018-05-05 05:05:05"), cTimestamp.get(Timestamp.valueOf("2018-01-01 01:01:01"))); + assertEquals(Timestamp.valueOf("2017-01-01 02:02:02"), cTs.get(Timestamp.valueOf("2018-02-18 01:01:01"))); + assertEquals(Timestamp.valueOf("2018-05-05 05:05:05"), cTs.get(Timestamp.valueOf("2018-01-01 01:01:01"))); // Test UUID type. UUID k1 = UUID.fromString("121f5ae8-148d-11e8-b642-0ed5f89f718b"); @@ -2598,19 +2536,13 @@ public void testTypedGet() throws Exception { getTypedValue("int", "888", PARTITIONED.toString()); } - /** - * @return Signature. - * @throws Exception If failed. - */ - protected abstract String signature() throws Exception; - /** * @return True if any query cursor is available. */ private boolean queryCursorFound() { boolean found = false; - for (int i = 0; i < GRID_CNT; ++i) { + for (int i = 0; i < gridCount(); ++i) { Map handlers = GridTestUtils.getFieldValue(grid(i).context().rest(), "handlers"); @@ -2755,6 +2687,19 @@ public CircularRef ref() { public void ref(CircularRef ref) { this.ref = ref; } + + /** {@inheritDoc} */ + @Override public String toString() { + SB sb = new SB(); + + sb.a('{') + .a('"').a("id").a('"').a(':').a(id).a(',') + .a('"').a("name").a('"').a(':').a('"').a(name).a('"').a(',') + .a('"').a("ref").a('"').a(':').a(ref) + .a('}'); + + return sb.toString(); + } } /** @@ -2891,7 +2836,7 @@ public VisorGatewayArgument(Class cls) { * @return This helper for chaining method calls. */ public VisorGatewayArgument forNode(ClusterNode node) { - put("p1", node.id().toString()); + put("p1", node != null ? node.id().toString() : null); return this; } @@ -3057,7 +3002,7 @@ private static String concat(Object[] vals, String delim) { DataRegionConfiguration drCfg = new DataRegionConfiguration(); drCfg.setName("testDataRegion"); - drCfg.setMaxSize(100 * 1024 * 1024); + drCfg.setMaxSize(100L * 1024 * 1024); dsCfg.setDefaultDataRegionConfiguration(drCfg); diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAuthenticationSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAuthenticationSelfTest.java index ca62091a2ecee..27b8c03ff696d 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAuthenticationSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAuthenticationSelfTest.java @@ -24,7 +24,6 @@ import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.processors.authentication.IgniteAccessControlException; import org.apache.ignite.internal.processors.authentication.IgniteAuthenticationProcessor; -import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.testframework.GridTestUtils; @@ -33,18 +32,12 @@ /** * Test REST with enabled authentication. */ -public class JettyRestProcessorAuthenticationSelfTest extends JettyRestProcessorUnsignedSelfTest { +public abstract class JettyRestProcessorAuthenticationSelfTest extends JettyRestProcessorUnsignedSelfTest { /** */ - private static final String DFLT_LOGIN = "ignite"; + protected static final String DFLT_USER = "ignite"; /** */ - private static final String DFLT_PWD = "ignite"; - - /** */ - private String login = DFLT_LOGIN; - - /** */ - private String pwd = DFLT_PWD; + protected static final String DFLT_PWD = "ignite"; /** {@inheritDoc} */ @Override protected void beforeTestsStarted() throws Exception { @@ -54,11 +47,8 @@ public class JettyRestProcessorAuthenticationSelfTest extends JettyRestProcessor } /** {@inheritDoc} */ - @Override protected void beforeTest() throws Exception { - super.beforeTest(); - - login = DFLT_LOGIN; - pwd = DFLT_PWD; + @Override protected boolean securityEnabled() { + return true; } /** {@inheritDoc} */ @@ -97,32 +87,13 @@ public class JettyRestProcessorAuthenticationSelfTest extends JettyRestProcessor return cfg; } - /** {@inheritDoc} */ - @Override protected String restUrl() { - String url = super.restUrl(); - - if (!F.isEmpty(login)) { - url += "ignite.login=" + login; - - if (!F.isEmpty(pwd)) - url += "&ignite.password=" + pwd; - - url += '&'; - } - - return url; - } - /** * @throws Exception If failed. */ - public void testMissingCredentials() throws Exception { - login = null; - pwd = null; - - String ret = content(null, GridRestCommand.VERSION); + public void testAuthenticationCommand() throws Exception { + String ret = content(null, GridRestCommand.AUTHENTICATE); - assertResponseContainsError(ret, "The user name or password is incorrect"); + assertResponseSucceeded(ret, false); } /** diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAuthenticationWithCredsSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAuthenticationWithCredsSelfTest.java new file mode 100644 index 0000000000000..c75e8a9797a1c --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAuthenticationWithCredsSelfTest.java @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.rest; + +/** + * Test REST with enabled authentication and credentials in each request. + */ +public class JettyRestProcessorAuthenticationWithCredsSelfTest extends JettyRestProcessorAuthenticationSelfTest { + /** {@inheritDoc} */ + @Override protected String restUrl() { + String url = super.restUrl(); + + url += "ignite.login=" + DFLT_USER + "&ignite.password=" + DFLT_PWD + "&"; + + return url; + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAuthenticationWithTokenSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAuthenticationWithTokenSelfTest.java new file mode 100644 index 0000000000000..5c046af320394 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorAuthenticationWithTokenSelfTest.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.rest; + +import org.apache.ignite.internal.util.typedef.F; + +/** + * Test REST with enabled authentication and token. + */ +public class JettyRestProcessorAuthenticationWithTokenSelfTest extends JettyRestProcessorAuthenticationSelfTest { + /** */ + private String tok = ""; + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + // Authenticate and extract token. + if (F.isEmpty(tok)) { + String ret = content(null, GridRestCommand.AUTHENTICATE, + "user", DFLT_USER, + "password", DFLT_PWD); + + int p1 = ret.indexOf("sessionToken"); + int p2 = ret.indexOf('"', p1 + 16); + + tok = ret.substring(p1 + 15, p2); + } + } + + /** {@inheritDoc} */ + @Override protected String restUrl() { + String url = super.restUrl(); + + if (!F.isEmpty(tok)) + url += "sessionToken=" + tok + "&"; + + return url; + } + + /** + * @throws Exception If failed. + */ + public void testInvalidSessionToken() throws Exception { + tok = null; + + String ret = content(null, GridRestCommand.VERSION); + + assertResponseContainsError(ret, "Failed to handle request - session token not found or invalid"); + + tok = "InvalidToken"; + + ret = content(null, GridRestCommand.VERSION); + + assertResponseContainsError(ret, "Failed to handle request - session token not found or invalid"); + + tok = "26BE027D32CC42329DEC92D517B44E9E"; + + ret = content(null, GridRestCommand.VERSION); + + assertResponseContainsError(ret, "Failed to handle request - unknown session token (maybe expired session)"); + + tok = null; // Cleanup token for next tests. + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorCommonSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorCommonSelfTest.java new file mode 100644 index 0000000000000..1b9328443d68d --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorCommonSelfTest.java @@ -0,0 +1,173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.rest; + +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.LineNumberReader; +import java.net.URL; +import java.net.URLConnection; +import java.util.LinkedHashMap; +import java.util.Map; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; + +import org.apache.ignite.internal.processors.rest.protocols.http.jetty.GridJettyObjectMapper; +import org.apache.ignite.internal.util.typedef.internal.SB; + +import static org.apache.ignite.IgniteSystemProperties.IGNITE_JETTY_PORT; + +/** + * Base class for testing Jetty REST protocol. + */ +public abstract class JettyRestProcessorCommonSelfTest extends AbstractRestProcessorSelfTest { + /** Grid count. */ + private static final int GRID_CNT = 3; + + /** REST port. */ + private static final int DFLT_REST_PORT = 8091; + + /** JSON to java mapper. */ + protected static final ObjectMapper JSON_MAPPER = new GridJettyObjectMapper(); + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + System.setProperty(IGNITE_JETTY_PORT, Integer.toString(restPort())); + + super.beforeTestsStarted(); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + super.afterTestsStopped(); + + System.clearProperty(IGNITE_JETTY_PORT); + } + + /** {@inheritDoc} */ + @Override protected int gridCount() { + return GRID_CNT; + } + + /** + * @return Port to use for rest. Needs to be changed over time because Jetty has some delay before port unbind. + */ + protected int restPort() { + return DFLT_REST_PORT; + } + + /** + * @return Test URL + */ + protected String restUrl() { + return "http://" + LOC_HOST + ":" + restPort() + "/ignite?"; + } + + /** + * @return Security enabled flag. Should be the same with {@code ctx.security().enabled()}. + */ + protected boolean securityEnabled() { + return false; + } + + /** + * @return Signature. + * @throws Exception If failed. + */ + protected abstract String signature() throws Exception; + + /** + * Execute REST command and return result. + * + * @param params Command parameters. + * @return Returned content. + * @throws Exception If failed. + */ + protected String content(Map params) throws Exception { + SB sb = new SB(restUrl()); + + for (Map.Entry e : params.entrySet()) + sb.a(e.getKey()).a('=').a(e.getValue()).a('&'); + + URL url = new URL(sb.toString()); + + URLConnection conn = url.openConnection(); + + String signature = signature(); + + if (signature != null) + conn.setRequestProperty("X-Signature", signature); + + InputStream in = conn.getInputStream(); + + StringBuilder buf = new StringBuilder(256); + + try (LineNumberReader rdr = new LineNumberReader(new InputStreamReader(in, "UTF-8"))) { + for (String line = rdr.readLine(); line != null; line = rdr.readLine()) + buf.append(line); + } + + return buf.toString(); + } + + /** + * @param cacheName Optional cache name. + * @param cmd REST command. + * @param params Command parameters. + * @return Returned content. + * @throws Exception If failed. + */ + protected String content(String cacheName, GridRestCommand cmd, String... params) throws Exception { + Map paramsMap = new LinkedHashMap<>(); + + if (cacheName != null) + paramsMap.put("cacheName", cacheName); + + paramsMap.put("cmd", cmd.key()); + + if (params != null) { + assertEquals(0, params.length % 2); + + for (int i = 0; i < params.length; i += 2) + paramsMap.put(params[i], params[i + 1]); + } + + return content(paramsMap); + } + + /** + * @param json JSON content. + * @param field Field name in JSON object. + * @return Field value. + * @throws IOException If failed. + */ + protected String jsonField(String json, String field) throws IOException { + assertNotNull(json); + assertFalse(json.isEmpty()); + + JsonNode node = JSON_MAPPER.readTree(json); + + JsonNode fld = node.get(field); + + assertNotNull(fld); + + return fld.asText(); + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorGetAllAsArrayTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorGetAllAsArrayTest.java new file mode 100644 index 0000000000000..521d7c18e9916 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorGetAllAsArrayTest.java @@ -0,0 +1,84 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.rest; + +import com.fasterxml.jackson.databind.JsonNode; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import org.apache.ignite.internal.util.typedef.F; + +import static org.apache.ignite.IgniteSystemProperties.IGNITE_REST_GETALL_AS_ARRAY; +import static org.apache.ignite.internal.processors.rest.GridRestResponse.STATUS_SUCCESS; + +/** */ +public class JettyRestProcessorGetAllAsArrayTest extends JettyRestProcessorCommonSelfTest { + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + System.setProperty(IGNITE_REST_GETALL_AS_ARRAY, "true"); + + super.beforeTestsStarted(); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + super.afterTestsStopped(); + + System.clearProperty(IGNITE_REST_GETALL_AS_ARRAY); + } + + /** + * @throws Exception If failed. + */ + public void testGetAll() throws Exception { + final Map entries = F.asMap("getKey1", "getVal1", "getKey2", "getVal2"); + + jcache().putAll(entries); + + String ret = content(DEFAULT_CACHE_NAME, GridRestCommand.CACHE_GET_ALL, + "k1", "getKey1", + "k2", "getKey2" + ); + + info("Get all command result: " + ret); + + assertNotNull(ret); + assertFalse(ret.isEmpty()); + + JsonNode node = JSON_MAPPER.readTree(ret); + + assertEquals(STATUS_SUCCESS, node.get("successStatus").asInt()); + assertTrue(node.get("error").isNull()); + + JsonNode res = node.get("response"); + + assertTrue(res.isArray()); + + Set> returnValue = new HashSet<>(); + + returnValue.add(F.asMap("key", "getKey1", "value", "getVal1")); + returnValue.add(F.asMap("key", "getKey2", "value", "getVal2")); + + assertEquals(returnValue, JSON_MAPPER.treeToValue(res, Set.class)); + } + + /** {@inheritDoc} */ + @Override protected String signature() { + return null; + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorSignedSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorSignedSelfTest.java index 00e4c68a9e951..3be99b489eef3 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorSignedSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorSignedSelfTest.java @@ -82,11 +82,11 @@ public void testUnauthorized() throws Exception { @Override protected String signature() throws Exception { long ts = U.currentTimeMillis(); - String s = ts + ":" + REST_SECRET_KEY; - try { MessageDigest md = MessageDigest.getInstance("SHA-1"); + String s = ts + ":" + REST_SECRET_KEY; + md.update(s.getBytes()); String hash = Base64.getEncoder().encodeToString(md.digest()); @@ -97,4 +97,4 @@ public void testUnauthorized() throws Exception { throw new Exception("Failed to create authentication signature.", e); } } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorUnsignedSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorUnsignedSelfTest.java index 988cedf38fe42..c7ff0d27ae7db 100644 --- a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorUnsignedSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/JettyRestProcessorUnsignedSelfTest.java @@ -18,16 +18,11 @@ package org.apache.ignite.internal.processors.rest; /** - * + * Unsigned REST tests. */ public class JettyRestProcessorUnsignedSelfTest extends JettyRestProcessorAbstractSelfTest { /** {@inheritDoc} */ - @Override protected int restPort() { - return 8091; - } - - /** {@inheritDoc} */ - @Override protected String signature() throws Exception { + @Override protected String signature() { return null; } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/TcpRestUnmarshalVulnerabilityTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/TcpRestUnmarshalVulnerabilityTest.java new file mode 100644 index 0000000000000..92d824be329db --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/TcpRestUnmarshalVulnerabilityTest.java @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.rest; + +import java.io.BufferedInputStream; +import java.io.BufferedOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.ObjectInputStream; +import java.io.OutputStream; +import java.net.InetAddress; +import java.net.Socket; +import java.nio.ByteBuffer; +import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; +import org.apache.ignite.configuration.ConnectorConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.client.marshaller.jdk.GridClientJdkMarshaller; +import org.apache.ignite.internal.processors.rest.client.message.GridClientHandshakeRequest; +import org.apache.ignite.internal.processors.rest.client.message.GridClientMessage; +import org.apache.ignite.internal.util.IgniteUtils; +import org.apache.ignite.internal.util.lang.GridAbsPredicate; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; + +import static org.apache.ignite.IgniteSystemProperties.IGNITE_MARSHALLER_BLACKLIST; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_MARSHALLER_WHITELIST; +import static org.apache.ignite.internal.processors.rest.protocols.tcp.GridMemcachedMessage.IGNITE_HANDSHAKE_FLAG; +import static org.apache.ignite.internal.processors.rest.protocols.tcp.GridMemcachedMessage.IGNITE_REQ_FLAG; + +/** + * Tests for whitelist and blacklist ot avoiding deserialization vulnerability. + */ +public class TcpRestUnmarshalVulnerabilityTest extends GridCommonAbstractTest { + /** Marshaller. */ + private static final GridClientJdkMarshaller MARSH = new GridClientJdkMarshaller(); + + /** Shared value. */ + private static final AtomicBoolean SHARED = new AtomicBoolean(); + + /** Port. */ + private static int port; + + /** Host. */ + private static String host; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + ConnectorConfiguration connCfg = new ConnectorConfiguration(); + + port = connCfg.getPort(); + host = connCfg.getHost(); + + cfg.setConnectorConfiguration(connCfg); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + SHARED.set(false); + + System.clearProperty(IGNITE_MARSHALLER_WHITELIST); + System.clearProperty(IGNITE_MARSHALLER_BLACKLIST); + + IgniteUtils.clearClassCache(); + } + + /** + * @throws Exception If failed. + */ + public void testNoLists() throws Exception { + testExploit(true); + } + + /** + * @throws Exception If failed. + */ + public void testWhiteListIncluded() throws Exception { + String path = U.resolveIgnitePath("modules/core/src/test/config/class_list_exploit_included.txt").getPath(); + + System.setProperty(IGNITE_MARSHALLER_WHITELIST, path); + + testExploit(true); + } + + /** + * @throws Exception If failed. + */ + public void testWhiteListExcluded() throws Exception { + String path = U.resolveIgnitePath("modules/core/src/test/config/class_list_exploit_excluded.txt").getPath(); + + System.setProperty(IGNITE_MARSHALLER_WHITELIST, path); + + testExploit(false); + } + + /** + * @throws Exception If failed. + */ + public void testBlackListIncluded() throws Exception { + String path = U.resolveIgnitePath("modules/core/src/test/config/class_list_exploit_included.txt").getPath(); + + System.setProperty(IGNITE_MARSHALLER_BLACKLIST, path); + + testExploit(false); + } + + /** + * @throws Exception If failed. + */ + public void testBlackListExcluded() throws Exception { + String path = U.resolveIgnitePath("modules/core/src/test/config/class_list_exploit_excluded.txt").getPath(); + + System.setProperty(IGNITE_MARSHALLER_BLACKLIST, path); + + testExploit(true); + } + + /** + * @throws Exception If failed. + */ + public void testBothListIncluded() throws Exception { + String path = U.resolveIgnitePath("modules/core/src/test/config/class_list_exploit_included.txt").getPath(); + + System.setProperty(IGNITE_MARSHALLER_WHITELIST, path); + System.setProperty(IGNITE_MARSHALLER_BLACKLIST, path); + + testExploit(false); + } + + /** + * @param positive Positive. + */ + private void testExploit(boolean positive) throws Exception { + try { + startGrid(); + + attack(marshal(new Exploit()).array()); + + boolean res = GridTestUtils.waitForCondition(new GridAbsPredicate() { + @Override public boolean apply() { + return SHARED.get(); + } + }, 3000L); + + if (positive) + assertTrue(res); + else + assertFalse(res); + } + finally { + stopAllGrids(); + } + } + + /** + * @param obj Object. + */ + private static ByteBuffer marshal(Object obj) throws IOException { + return MARSH.marshal(obj, 0); + } + + /** + * @param data Data. + */ + private void attack(byte[] data) throws IOException { + InetAddress addr = InetAddress.getByName(host); + + try ( + Socket sock = new Socket(addr, port); + OutputStream os = new BufferedOutputStream(sock.getOutputStream()) + ) { + // Handshake request. + os.write(IGNITE_HANDSHAKE_FLAG); + + GridClientHandshakeRequest req = new GridClientHandshakeRequest(); + req.marshallerId(GridClientJdkMarshaller.ID); + os.write(req.rawBytes()); + os.flush(); + + // Handshake response + InputStream is = new BufferedInputStream(sock.getInputStream()); + + is.read(new byte[146]); // Read handshake response. + + int len = data.length + 40; + + os.write(IGNITE_REQ_FLAG); // Package type. + os.write((byte)(len >> 24)); // Package length. + os.write((byte)(len >> 16)); + os.write((byte)(len >> 8)); + os.write((byte)(len)); + os.write(new byte[40]); // Stream header. + os.write(data); // Exploit. + os.flush(); + } + } + + /** */ + private static class Exploit implements GridClientMessage { + /** + * @param is Input stream. + */ + private void readObject(ObjectInputStream is) throws ClassNotFoundException, IOException { + SHARED.set(true); + } + + /** {@inheritDoc} */ + @Override public long requestId() { + return 0; + } + + /** {@inheritDoc} */ + @Override public void requestId(long reqId) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public UUID clientId() { + return null; + } + + /** {@inheritDoc} */ + @Override public void clientId(UUID id) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public UUID destinationId() { + return null; + } + + /** {@inheritDoc} */ + @Override public void destinationId(UUID id) { + // No-op. + } + + /** {@inheritDoc} */ + @Override public byte[] sessionToken() { + return new byte[0]; + } + + /** {@inheritDoc} */ + @Override public void sessionToken(byte[] sesTok) { + // No-op. + } + } +} \ No newline at end of file diff --git a/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/protocols/tcp/redis/RedisProtocolGetAllAsArrayTest.java b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/protocols/tcp/redis/RedisProtocolGetAllAsArrayTest.java new file mode 100644 index 0000000000000..f892ca5c1cc98 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/internal/processors/rest/protocols/tcp/redis/RedisProtocolGetAllAsArrayTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.rest.protocols.tcp.redis; + +import org.apache.ignite.IgniteSystemProperties; + +/** + * Test for being unaffected by {@link IgniteSystemProperties#IGNITE_REST_GETALL_AS_ARRAY}. + */ +public class RedisProtocolGetAllAsArrayTest extends RedisProtocolStringSelfTest { + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + System.setProperty(IgniteSystemProperties.IGNITE_REST_GETALL_AS_ARRAY, "true"); + + super.beforeTestsStarted(); + } + + /** {@inheritDoc} */ + @Override protected void afterTestsStopped() throws Exception { + super.afterTestsStopped(); + + System.clearProperty(IgniteSystemProperties.IGNITE_REST_GETALL_AS_ARRAY); + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java index 20594083ce82e..5e0350e61f734 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/JdbcErrorsAbstractSelfTest.java @@ -267,10 +267,10 @@ public void testInvalidIntFormat() throws SQLException { rs.next(); - rs.getLong(1); + rs.getInt(1); } } - }, "0700B", "Cannot convert to long"); + }, "0700B", "Cannot convert to int"); } /** diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java index a88ebe820e557..eb10c20f40ff5 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/suite/IgniteJdbcDriverTestSuite.java @@ -42,6 +42,7 @@ import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadTransactionalPartitionedNearSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadTransactionalPartitionedSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinBulkLoadTransactionalReplicatedSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlCustomSchemaSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinComplexQuerySelfTest; @@ -65,12 +66,14 @@ import org.apache.ignite.jdbc.thin.JdbcThinMetadataSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinMissingLongArrayResultsTest; import org.apache.ignite.jdbc.thin.JdbcThinNoDefaultSchemaTest; +import org.apache.ignite.jdbc.thin.JdbcThinPreparedStatementLeakTest; import org.apache.ignite.jdbc.thin.JdbcThinPreparedStatementSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinResultSetSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinSchemaCaseTest; import org.apache.ignite.jdbc.thin.JdbcThinSelectAfterAlterTable; import org.apache.ignite.jdbc.thin.JdbcThinStatementSelfTest; -import org.apache.ignite.jdbc.thin.JdbcThinStreamingSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinStreamingNotOrderedSelfTest; +import org.apache.ignite.jdbc.thin.JdbcThinStreamingOrderedSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinTcpIoTest; import org.apache.ignite.jdbc.thin.JdbcThinUpdateStatementSelfTest; import org.apache.ignite.jdbc.thin.JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest; @@ -128,7 +131,8 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(JdbcBlobTest.class)); suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcStreamingSelfTest.class)); - suite.addTest(new TestSuite(JdbcThinStreamingSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinStreamingNotOrderedSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinStreamingOrderedSelfTest.class)); // DDL tests. suite.addTest(new TestSuite(org.apache.ignite.internal.jdbc2.JdbcDynamicIndexAtomicPartitionedNearSelfTest.class)); @@ -188,6 +192,7 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(JdbcThinUpdateStatementSkipReducerOnUpdateSelfTest.class)); suite.addTest(new TestSuite(JdbcThinMergeStatementSkipReducerOnUpdateSelfTest.class)); suite.addTest(new TestSuite(JdbcThinComplexDmlDdlSkipReducerOnUpdateSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinComplexDmlDdlCustomSchemaSelfTest.class)); suite.addTest(new TestSuite(JdbcThinLocalQueriesSelfTest.class)); @@ -195,6 +200,8 @@ public static TestSuite suite() throws Exception { suite.addTest(new TestSuite(JdbcThinWalModeChangeSelfTest.class)); suite.addTest(new TestSuite(JdbcThinAuthenticateConnectionSelfTest.class)); + suite.addTest(new TestSuite(JdbcThinPreparedStatementLeakTest.class)); + return suite; } } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java index 178cd3afba9d4..7ac96990a01eb 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAbstractSelfTest.java @@ -28,15 +28,13 @@ import java.util.Collection; import java.util.Collections; import java.util.List; -import java.util.concurrent.Callable; -import org.apache.ignite.IgniteException; import org.apache.ignite.internal.IgniteEx; import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor; import org.apache.ignite.internal.processors.port.GridPortRecord; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.testframework.GridTestUtils; +import org.apache.ignite.testframework.GridTestUtils.RunnableX; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; -import org.jetbrains.annotations.Nullable; /** * Connection test. @@ -47,27 +45,18 @@ public class JdbcThinAbstractSelfTest extends GridCommonAbstractTest { * @param r Runnable to check support. */ protected void checkNotSupported(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); - - return null; - } - }, SQLFeatureNotSupportedException.class, null); + GridTestUtils.assertThrowsWithCause(r, SQLFeatureNotSupportedException.class); } /** * @param r Runnable to check on closed connection. */ protected void checkConnectionClosed(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); + GridTestUtils.assertThrowsAnyCause(log, + () -> { + r.run(); - return null; - } + return null; }, SQLException.class, "Connection is closed"); } @@ -75,13 +64,11 @@ protected void checkConnectionClosed(final RunnableX r) { * @param r Runnable to check on closed statement. */ protected void checkStatementClosed(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); + GridTestUtils.assertThrowsAnyCause(log, + () -> { + r.run(); - return null; - } + return null; }, SQLException.class, "Statement is closed"); } @@ -89,26 +76,14 @@ protected void checkStatementClosed(final RunnableX r) { * @param r Runnable to check on closed result set. */ protected void checkResultSetClosed(final RunnableX r) { - GridTestUtils.assertThrows(log, - new Callable() { - @Override public Object call() throws Exception { - r.run(); + GridTestUtils.assertThrowsAnyCause(log, + () -> { + r.run(); - return null; - } + return null; }, SQLException.class, "Result set is closed"); } - /** - * Runnable that can throw an exception. - */ - interface RunnableX { - /** - * @throws Exception On error. - */ - void run() throws Exception; - } - /** * @param node Node to connect to. * @param params Connection parameters. diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAuthenticateConnectionSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAuthenticateConnectionSelfTest.java index 97218fb09162e..cb4d7f3cf70c9 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAuthenticateConnectionSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinAuthenticateConnectionSelfTest.java @@ -60,7 +60,10 @@ public class JdbcThinAuthenticateConnectionSelfTest extends JdbcThinAbstractSelf cfg.setDataStorageConfiguration(new DataStorageConfiguration() .setDefaultDataRegionConfiguration(new DataRegionConfiguration() - .setPersistenceEnabled(true))); + .setPersistenceEnabled(true) + .setMaxSize(DataStorageConfiguration.DFLT_DATA_REGION_INITIAL_SIZE) + ) + ); return cfg; } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAbstractSelfTest.java index e9cb8325a1ecf..2a4c7995fbdf6 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAbstractSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinBulkLoadAbstractSelfTest.java @@ -22,9 +22,16 @@ import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; +import org.apache.ignite.internal.processors.bulkload.BulkLoadCsvFormat; +import org.apache.ignite.internal.processors.bulkload.BulkLoadCsvParser; import org.apache.ignite.internal.processors.query.QueryUtils; +import org.apache.ignite.lang.IgniteClosure; import org.apache.ignite.testframework.GridTestUtils; +import java.nio.ByteBuffer; +import java.nio.charset.Charset; +import java.nio.charset.CodingErrorAction; +import java.nio.charset.UnsupportedCharsetException; import java.sql.BatchUpdateException; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -35,7 +42,6 @@ import java.util.Objects; import java.util.concurrent.Callable; -import static org.apache.ignite.IgniteSystemProperties.IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK; import static org.apache.ignite.cache.CacheMode.PARTITIONED; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; import static org.apache.ignite.internal.util.IgniteUtils.resolveIgnitePath; @@ -47,9 +53,13 @@ public abstract class JdbcThinBulkLoadAbstractSelfTest extends JdbcThinAbstractD /** Subdirectory with CSV files */ private static final String CSV_FILE_SUBDIR = "/modules/clients/src/test/resources/"; + /** Default table name. */ + private static final String TBL_NAME = "Person"; + /** A CSV file with zero records */ private static final String BULKLOAD_EMPTY_CSV_FILE = - Objects.requireNonNull(resolveIgnitePath(CSV_FILE_SUBDIR + "bulkload0.csv")).getAbsolutePath(); + Objects.requireNonNull(resolveIgnitePath(CSV_FILE_SUBDIR + "bulkload0.csv")) + .getAbsolutePath(); /** A CSV file with one record. */ private static final String BULKLOAD_ONE_LINE_CSV_FILE = @@ -60,18 +70,19 @@ public abstract class JdbcThinBulkLoadAbstractSelfTest extends JdbcThinAbstractD Objects.requireNonNull(resolveIgnitePath(CSV_FILE_SUBDIR + "bulkload2.csv")).getAbsolutePath(); /** A CSV file in UTF-8. */ - private static final String BULKLOAD_UTF_CSV_FILE = - Objects.requireNonNull(resolveIgnitePath(CSV_FILE_SUBDIR + "bulkload2_utf.csv")).getAbsolutePath(); + private static final String BULKLOAD_UTF8_CSV_FILE = + Objects.requireNonNull(resolveIgnitePath(CSV_FILE_SUBDIR + "bulkload2_utf8.csv")).getAbsolutePath(); - /** Default table name. */ - private static final String TBL_NAME = "Person"; + /** A CSV file in windows-1251. */ + private static final String BULKLOAD_CP1251_CSV_FILE = + Objects.requireNonNull(resolveIgnitePath(CSV_FILE_SUBDIR + "bulkload2_windows1251.csv")).getAbsolutePath(); /** Basic COPY statement used in majority of the tests. */ public static final String BASIC_SQL_COPY_STMT = - "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\"" + - " into " + TBL_NAME + - " (_key, age, firstName, lastName)" + - " format csv"; + "copy from '" + BULKLOAD_TWO_LINES_CSV_FILE + "'" + + " into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv"; /** JDBC statement. */ private Statement stmt; @@ -161,8 +172,6 @@ private CacheConfiguration cacheConfigWithQueryEntity() { @Override protected void beforeTest() throws Exception { super.beforeTest(); - System.setProperty(IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK, "TRUE"); - stmt = conn.createStatement(); assertNotNull(stmt); @@ -176,8 +185,6 @@ private CacheConfiguration cacheConfigWithQueryEntity() { assertTrue(stmt.isClosed()); - System.clearProperty(IGNITE_SQL_PARSER_DISABLE_H2_FALLBACK); - super.afterTest(); } @@ -203,9 +210,9 @@ public void testBasicStatement() throws SQLException { */ public void testEmptyFile() throws SQLException { int updatesCnt = stmt.executeUpdate( - "copy from \"" + BULKLOAD_EMPTY_CSV_FILE + "\" into " + TBL_NAME + - " (_key, age, firstName, lastName)" + - " format csv"); + "copy from '" + BULKLOAD_EMPTY_CSV_FILE + "' into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv"); assertEquals(0, updatesCnt); @@ -219,35 +226,161 @@ public void testEmptyFile() throws SQLException { */ public void testOneLineFile() throws SQLException { int updatesCnt = stmt.executeUpdate( - "copy from \"" + BULKLOAD_ONE_LINE_CSV_FILE + "\" into " + TBL_NAME + - " (_key, age, firstName, lastName)" + - " format csv"); + "copy from '" + BULKLOAD_ONE_LINE_CSV_FILE + "' into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv"); assertEquals(1, updatesCnt); checkCacheContents(TBL_NAME, true, 1); } + /** + * Verifies that error is reported for empty charset name. + */ + public void testEmptyCharset() { + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + stmt.executeUpdate( + "copy from 'any.file' into Person " + + "(_key, age, firstName, lastName) " + + "format csv charset ''"); + + return null; + } + }, SQLException.class, "Unknown charset name: ''"); + } + + /** + * Verifies that error is reported for unsupported charset name. + */ + public void testNotSupportedCharset() { + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + stmt.executeUpdate( + "copy from 'any.file' into Person " + + "(_key, age, firstName, lastName) " + + "format csv charset 'nonexistent'"); + + return null; + } + }, SQLException.class, "Charset is not supported: 'nonexistent'"); + } + + /** + * Verifies that error is reported for unknown charset name. + */ + public void testUnknownCharset() { + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + stmt.executeUpdate( + "copy from 'any.file' into Person " + + "(_key, age, firstName, lastName) " + + "format csv charset '8^)\'"); + + return null; + } + }, SQLException.class, "Unknown charset name: '8^)'"); + } + + /** + * Verifies that ASCII encoding is recognized and imported. + * + * @throws SQLException If failed. + */ + public void testAsciiCharset() throws SQLException { + int updatesCnt = stmt.executeUpdate( + "copy from '" + BULKLOAD_TWO_LINES_CSV_FILE + "'" + + " into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv charset 'ascii'"); + + assertEquals(2, updatesCnt); + + checkCacheContents(TBL_NAME, true, 2); + } + /** * Imports two-entry CSV file with UTF-8 characters into a table and checks * the created entries using SELECT statement. * * @throws SQLException If failed. */ - public void testUtf() throws SQLException { + public void testUtf8Charset() throws SQLException { + checkBulkLoadWithCharset(BULKLOAD_UTF8_CSV_FILE, "utf-8"); + } + + /** + * Verifies that ASCII encoding is recognized and imported. + * + * @throws SQLException If failed. + */ + public void testWin1251Charset() throws SQLException { + checkBulkLoadWithCharset(BULKLOAD_CP1251_CSV_FILE, "windows-1251"); + } + + /** + * Bulk-loads specified file specifying charset in the command + * and verifies the entries imported. + * + * @param fileName CSV file to load. + * @param charsetName Charset name to specify in the command. + * @throws SQLException If failed. + */ + private void checkBulkLoadWithCharset(String fileName, String charsetName) throws SQLException { int updatesCnt = stmt.executeUpdate( - "copy from \"" + BULKLOAD_UTF_CSV_FILE + "\" into " + TBL_NAME + - " (_key, age, firstName, lastName)" + - " format csv"); + "copy from '" + fileName + "' into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv charset '" + charsetName + "'"); assertEquals(2, updatesCnt); - checkUtfCacheContents(TBL_NAME, true, 2); + checkNationalCacheContents(TBL_NAME); + } + + /** + * Verifies that no error is reported and characters are converted improperly when we import + * UTF-8 as windows-1251. + * + * @throws SQLException If failed. + */ + public void testWrongCharset_Utf8AsWin1251() throws SQLException { + checkBulkLoadWithWrongCharset(BULKLOAD_UTF8_CSV_FILE, "UTF-8", "windows-1251"); + } + + /** + * Verifies that no error is reported and characters are converted improperly when we import + * windows-1251 as UTF-8. + * + * @throws SQLException If failed. + */ + public void testWrongCharset_Win1251AsUtf8() throws SQLException { + checkBulkLoadWithWrongCharset(BULKLOAD_CP1251_CSV_FILE, "windows-1251", "UTF-8"); + } + + /** + * Verifies that no error is reported and characters are converted improperly when we import + * UTF-8 as ASCII. + * + * @throws SQLException If failed. + */ + public void testWrongCharset_Utf8AsAscii() throws SQLException { + checkBulkLoadWithWrongCharset(BULKLOAD_UTF8_CSV_FILE, "UTF-8", "ascii"); + } + + /** + * Verifies that no error is reported and characters are converted improperly when we import + * windows-1251 as ASCII. + * + * @throws SQLException If failed. + */ + public void testWrongCharset_Win1251AsAscii() throws SQLException { + checkBulkLoadWithWrongCharset(BULKLOAD_CP1251_CSV_FILE, "windows-1251", "ascii"); } /** - * Checks that bulk load works when we use batch size of 1 byte and thus - * create multiple batches per COPY. + * Checks that bulk load works when we use packet size of 1 byte and thus + * create multiple packets per COPY. * * @throws SQLException If failed. */ @@ -259,22 +392,39 @@ public void testPacketSize_1() throws SQLException { checkCacheContents(TBL_NAME, true, 2); } + /** + * Imports two-entry CSV file with UTF-8 characters into a table and checks + * the created entries using SELECT statement. + * + * @throws SQLException If failed. + */ + public void testDefaultCharset() throws SQLException { + int updatesCnt = stmt.executeUpdate( + "copy from '" + BULKLOAD_UTF8_CSV_FILE + "' into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv"); + + assertEquals(2, updatesCnt); + + checkNationalCacheContents(TBL_NAME); + } + /** * Imports two-entry CSV file with UTF-8 characters into a table using packet size of one byte - * (thus splitting each two-byte UTF-8 character into two batches) + * (thus splitting each two-byte UTF-8 character into two packets) * and checks the created entries using SELECT statement. * * @throws SQLException If failed. */ - public void testUtfPacketSize_1() throws SQLException { + public void testDefaultCharsetPacketSize1() throws SQLException { int updatesCnt = stmt.executeUpdate( - "copy from \"" + BULKLOAD_UTF_CSV_FILE + "\" into " + TBL_NAME + - " (_key, age, firstName, lastName)" + - " format csv packet_size 1"); + "copy from '" + BULKLOAD_UTF8_CSV_FILE + "' into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv packet_size 1"); assertEquals(2, updatesCnt); - checkUtfCacheContents(TBL_NAME, true, 2); + checkNationalCacheContents(TBL_NAME); } /** @@ -284,9 +434,9 @@ public void testWrongFileName() { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { stmt.executeUpdate( - "copy from \"nonexistent\" into Person" + - " (_key, age, firstName, lastName)" + - " format csv"); + "copy from 'nonexistent' into Person" + + " (_key, age, firstName, lastName)" + + " format csv"); return null; } @@ -300,9 +450,9 @@ public void testMissingTable() { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { stmt.executeUpdate( - "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into Peterson" + - " (_key, age, firstName, lastName)" + - " format csv"); + "copy from '" + BULKLOAD_TWO_LINES_CSV_FILE + "' into Peterson" + + " (_key, age, firstName, lastName)" + + " format csv"); return null; } @@ -316,9 +466,9 @@ public void testWrongColumnName() { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { stmt.executeUpdate( - "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into Person" + - " (_key, age, firstName, lostName)" + - " format csv"); + "copy from '" + BULKLOAD_TWO_LINES_CSV_FILE + "' into Person" + + " (_key, age, firstName, lostName)" + + " format csv"); return null; } @@ -332,9 +482,9 @@ public void testWrongColumnType() { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { stmt.executeUpdate( - "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into Person" + - " (_key, firstName, age, lastName)" + - " format csv"); + "copy from '" + BULKLOAD_TWO_LINES_CSV_FILE + "' into Person" + + " (_key, firstName, age, lastName)" + + " format csv"); return null; } @@ -348,9 +498,10 @@ public void testWrongColumnType() { */ public void testFieldsSubset() throws SQLException { int updatesCnt = stmt.executeUpdate( - "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into " + TBL_NAME + - " (_key, age, firstName)" + - " format csv"); + "copy from '" + BULKLOAD_TWO_LINES_CSV_FILE + "'" + + " into " + TBL_NAME + + " (_key, age, firstName)" + + " format csv"); assertEquals(2, updatesCnt); @@ -372,9 +523,9 @@ public void testCreateAndBulkLoadTable() throws SQLException { " (id int primary key, age int, firstName varchar(30), lastName varchar(30))"); int updatesCnt = stmt.executeUpdate( - "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into " + tblName + - "(_key, age, firstName, lastName)" + - " format csv"); + "copy from '" + BULKLOAD_TWO_LINES_CSV_FILE + "' into " + tblName + + "(_key, age, firstName, lastName)" + + " format csv"); assertEquals(2, updatesCnt); @@ -401,18 +552,20 @@ public void testConfigureQueryEntityAndBulkLoad() throws SQLException { } /** - * Verifies exception thrown if COPY is added into a batch. + * Verifies exception thrown if COPY is added into a packet. + * + * @throws SQLException If failed. */ - public void testMultipleStatement() { + public void testMultipleStatement() throws SQLException { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { stmt.addBatch(BASIC_SQL_COPY_STMT); - stmt.addBatch("copy from \"" + BULKLOAD_ONE_LINE_CSV_FILE + "\" into " + TBL_NAME + + stmt.addBatch("copy from '" + BULKLOAD_ONE_LINE_CSV_FILE + "' into " + TBL_NAME + " (_key, age, firstName, lastName)" + " format csv"); - stmt.addBatch("copy from \"" + BULKLOAD_UTF_CSV_FILE + "\" into " + TBL_NAME + + stmt.addBatch("copy from '" + BULKLOAD_UTF8_CSV_FILE + "' into " + TBL_NAME + " (_key, age, firstName, lastName)" + " format csv"); @@ -425,8 +578,10 @@ public void testMultipleStatement() { /** * Verifies that COPY command is rejected by Statement.executeQuery(). + * + * @throws SQLException If failed. */ - public void testExecuteQuery() { + public void testExecuteQuery() throws SQLException { GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { stmt.executeQuery(BASIC_SQL_COPY_STMT); @@ -466,22 +621,24 @@ public void testPreparedStatementWithExecuteUpdate() throws SQLException { /** * Verifies that COPY command reports an error when used with PreparedStatement parameter. + * + * @throws SQLException If failed. */ - public void testPreparedStatementWithParameter() { + public void testPreparedStatementWithParameter() throws SQLException { GridTestUtils.assertThrows(log, new Callable() { - @Override public Object call() throws Exception { - PreparedStatement pstmt = conn.prepareStatement( - "copy from \"" + BULKLOAD_TWO_LINES_CSV_FILE + "\" into " + TBL_NAME + + @Override public Object call() throws Exception { + PreparedStatement pstmt = conn.prepareStatement( + "copy from '" + BULKLOAD_TWO_LINES_CSV_FILE + "' into " + TBL_NAME + " (_key, age, firstName, lastName)" + " format ?"); - pstmt.setString(1, "csv"); + pstmt.setString(1, "csv"); - pstmt.executeUpdate(); + pstmt.executeUpdate(); - return null; - } - }, SQLException.class, "Unexpected token: \"?\" (expected: \"[identifier]\""); + return null; + } + }, SQLException.class, "Unexpected token: \"?\" (expected: \"[identifier]\""); } /** @@ -515,7 +672,9 @@ public void testPreparedStatementWithExecuteQuery() { } /** - * Checks cache contents for a typical test using SQL SELECT command. + * Checks cache contents after bulk loading data in the above tests: ASCII version. + *

+ * Uses SQL SELECT command for querying entries. * * @param tblName Table name to query. * @param checkLastName Check 'lastName' column (not imported in some tests). @@ -554,18 +713,47 @@ else if (id == 456) { } /** - * Checks cache contents for a UTF-8 bulk load tests using SQL SELECT command. + * Checks cache contents after bulk loading data in the above tests: + * national charset version. + *

+ * Uses SQL SELECT command for querying entries. * * @param tblName Table name to query. - * @param checkLastName Check 'lastName' column (not imported in some tests). - * @param recCnt Number of records to expect. * @throws SQLException When one of checks has failed. */ - private void checkUtfCacheContents(String tblName, boolean checkLastName, int recCnt) throws SQLException { + private void checkNationalCacheContents(String tblName) throws SQLException { + checkRecodedNationalCacheContents(tblName, null, null); + } + + /** + * Checks cache contents after bulk loading data in the tests: + * normal and erroneously recoded national charset version. + *

+ * Uses SQL SELECT command for querying entries. + * + * @param tblName Table name to query. + * @param csvCharsetName Either null or the charset used in CSV file + * Note that the both {@code csvCharsetName} and {@code stmtCharsetName} should be either null or non-null. + * @param stmtCharsetName Either null or the charset specified in COPY statement. + * @throws SQLException When one of checks has failed. + */ + private void checkRecodedNationalCacheContents(String tblName, + String csvCharsetName, String stmtCharsetName) throws SQLException { + assert (csvCharsetName != null) == (stmtCharsetName != null); + ResultSet rs = stmt.executeQuery("select _key, age, firstName, lastName from " + tblName); assert rs != null; + IgniteClosure recoder = + (csvCharsetName != null) + ? new WrongCharsetRecoder(csvCharsetName, stmtCharsetName) + : new IgniteClosure() { + @Override public String apply(String input) { + return input; + } + }; + int cnt = 0; while (rs.next()) { @@ -573,15 +761,17 @@ private void checkUtfCacheContents(String tblName, boolean checkLastName, int re if (id == 123) { assertEquals(12, rs.getInt("age")); - assertEquals("Имя123 Отчество123", rs.getString("firstName")); - if (checkLastName) - assertEquals("Фамилия123", rs.getString("lastName")); + + assertEquals(recoder.apply("Имя123 Отчество123"), rs.getString("firstName")); + + assertEquals(recoder.apply("Фамилия123"), rs.getString("lastName")); } else if (id == 456) { assertEquals(45, rs.getInt("age")); - assertEquals("Имя456", rs.getString("firstName")); - if (checkLastName) - assertEquals("Фамилия456", rs.getString("lastName")); + + assertEquals(recoder.apply("Имя456"), rs.getString("firstName")); + + assertEquals(recoder.apply("Фамилия456"), rs.getString("lastName")); } else fail("Wrong ID: " + id); @@ -589,6 +779,70 @@ else if (id == 456) { cnt++; } - assertEquals(recCnt, cnt); + assertEquals(2, cnt); + } + + /** + * Checks that no error is reported and characters are converted improperly when we import + * file having a different charset than the one specified in the SQL statement. + * + * @param csvFileName Imported file name. + * @param csvCharsetName Imported file charset. + * @param stmtCharsetName Charset to specify in the SQL statement. + * @throws SQLException If failed. + */ + private void checkBulkLoadWithWrongCharset(String csvFileName, String csvCharsetName, String stmtCharsetName) + throws SQLException { + int updatesCnt = stmt.executeUpdate( + "copy from '" + csvFileName + "' into " + TBL_NAME + + " (_key, age, firstName, lastName)" + + " format csv charset '" + stmtCharsetName + "'"); + + assertEquals(2, updatesCnt); + + checkRecodedNationalCacheContents(TBL_NAME, csvCharsetName, stmtCharsetName); + } + + /** + * Recodes an input string as if it was encoded in one charset and was read using + * another charset using {@link CodingErrorAction#REPLACE} settings for + * unmappable and malformed characters. + */ + private static class WrongCharsetRecoder implements IgniteClosure { + /** Charset in which the string we are reading is actually encoded. */ + private final Charset actualCharset; + + /** Charset which we use to read the string. */ + private final Charset appliedCharset; + + /** + * Creates the recoder. + * + * @param actualCharset Charset in which the string we are reading is actually encoded. + * @param appliedCharset Charset which we use to read the string. + * @throws UnsupportedCharsetException if the charset name is wrong. + */ + WrongCharsetRecoder(String actualCharset, String appliedCharset) { + this.actualCharset = Charset.forName(actualCharset); + this.appliedCharset = Charset.forName(appliedCharset); + } + + /** + * Converts string as it was read using a wrong charset. + *

+ * First the method converts the string into {@link #actualCharset} and puts bytes into a buffer. + * Then it tries to read these bytes from the buffer using {@link #appliedCharset} and + * {@link CodingErrorAction#REPLACE} settings for unmappable and malformed characters + * (NB: these settings implicitly come from {@link Charset#decode(ByteBuffer)} implementation, while + * being explicitly set in {@link BulkLoadCsvParser#BulkLoadCsvParser(BulkLoadCsvFormat)}). + * + * @param input The input string (in Java encoding). + * @return The converted string. + */ + @Override public String apply(String input) { + ByteBuffer encodedBuf = actualCharset.encode(input); + + return appliedCharset.decode(encodedBuf).toString(); + } } } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlCustomSchemaSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlCustomSchemaSelfTest.java new file mode 100644 index 0000000000000..8fd9356533be5 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlCustomSchemaSelfTest.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import org.apache.ignite.configuration.IgniteConfiguration; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; + +/** + * Base class for complex SQL tests based on JDBC driver. + */ +public class JdbcThinComplexDmlDdlCustomSchemaSelfTest extends JdbcThinComplexDmlDdlSelfTest { + /** Simple schema. */ + private static final String SCHEMA_1 = "SCHEMA_1"; + + /** Complex schema. */ + private static final String SCHEMA_2 = "\"SCHEMA 2\""; + + /** Current schema. */ + private String curSchema = SCHEMA_1; + + /** {@inheritDoc} */ + @Override protected IgniteConfiguration getConfiguration(String igniteInstanceName) throws Exception { + IgniteConfiguration cfg = super.getConfiguration(igniteInstanceName); + + cfg.setSqlSchemas(SCHEMA_1, SCHEMA_2); + + return cfg; + } + + /** {@inheritDoc} */ + @Override protected Connection createConnection() throws SQLException { + return DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1/" + curSchema); + } + + /** + * Test create/select/drop flow on escaped schema. + * + * @throws Exception If failed. + */ + public void testCreateSelectDropEscapedSchema() throws Exception { + try { + curSchema = SCHEMA_2; + + testCreateSelectDrop(); + } + finally { + curSchema = SCHEMA_1; + } + } + + /** + * Test multiple iterations. + * + * @throws Exception If failed. + */ + public void testMultiple() throws Exception { + testCreateSelectDrop(); + testCreateSelectDrop(); + } +} \ No newline at end of file diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java index 7941abe7a85ea..085d7e8faf7c5 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexDmlDdlSelfTest.java @@ -33,12 +33,10 @@ import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; -import org.apache.ignite.lang.IgniteCallable; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; -import org.apache.ignite.testframework.GridTestUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import org.jetbrains.annotations.NotNull; @@ -82,9 +80,8 @@ public class JdbcThinComplexDmlDdlSelfTest extends GridCommonAbstractTest { /** * @param name Cache name. * @return Cache configuration. - * @throws Exception In case of error. */ - private CacheConfiguration cacheConfiguration(@NotNull String name) throws Exception { + private CacheConfiguration cacheConfiguration(@NotNull String name) { CacheConfiguration cfg = defaultCacheConfiguration(); cfg.setName(name); @@ -115,8 +112,6 @@ protected Connection createConnection() throws SQLException { /** {@inheritDoc} */ @Override protected void beforeTest() throws Exception { super.beforeTest(); - - conn = createConnection(); } /** {@inheritDoc} */ @@ -138,14 +133,8 @@ protected Connection createConnection() throws SQLException { * @throws Exception If failed. */ @SuppressWarnings("ThrowableResultOfMethodCallIgnored") - public void testCreateSelect() throws Exception { - GridTestUtils.assertThrows(null, new IgniteCallable() { - @Override public Object call() throws Exception { - sql(new ResultChecker(new Object[][] {}), "SELECT * from Person"); - - return null; - } - }, SQLException.class, "Table \"PERSON\" not found"); + public void testCreateSelectDrop() throws Exception { + conn = createConnection(); sql(new UpdateChecker(0), "CREATE TABLE person (id int, name varchar, age int, company varchar, city varchar, " + @@ -233,6 +222,9 @@ public void testCreateSelect() throws Exception { assert cnt[0] == 34 : "Invalid rows count"; sql(new UpdateChecker(0), "DROP INDEX idx"); + + sql(new UpdateChecker(0), "DROP TABLE city"); + sql(new UpdateChecker(0), "DROP TABLE person"); } /** diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java index ad1e3126c88aa..22d7d71ea8f1f 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinComplexQuerySelfTest.java @@ -21,6 +21,7 @@ import java.sql.Connection; import java.sql.DriverManager; import java.sql.ResultSet; +import java.sql.SQLException; import java.sql.Statement; import org.apache.ignite.IgniteCache; import org.apache.ignite.cache.affinity.AffinityKey; @@ -31,6 +32,7 @@ import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; +import org.apache.ignite.testframework.GridTestUtils; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; import static org.apache.ignite.cache.CacheMode.PARTITIONED; @@ -266,6 +268,37 @@ public void testCalculatedValue() throws Exception { assert cnt == 3; } + /** + * @throws Exception If failed. + */ + public void testWrongArgumentType() throws Exception { + try (ResultSet rs = stmt.executeQuery("select * from \"org\".Organization where name = '2'")) { + assertFalse(rs.next()); + } + + // Check non-indexed field. + GridTestUtils.assertThrowsWithCause(() -> { + try (ResultSet rs = stmt.executeQuery("select * from \"org\".Organization where name = 2")) { + assertFalse(rs.next()); + } + + return null; + }, SQLException.class); + + // Check indexed field. + try (ResultSet rs = stmt.executeQuery("select * from \"pers\".Person where name = '2'")) { + assertFalse(rs.next()); + } + + GridTestUtils.assertThrowsWithCause(() -> { + try (ResultSet rs = stmt.executeQuery("select * from \"pers\".Person where name = 2")) { + assertFalse(rs.next()); + } + + return null; + }, SQLException.class); + } + /** * Person. */ @@ -276,7 +309,7 @@ private static class Person implements Serializable { private final int id; /** Name. */ - @QuerySqlField(index = false) + @QuerySqlField(index = true) private final String name; /** Age. */ diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionMultipleAddressesTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionMultipleAddressesTest.java index 2c2aba970a571..4f6651c2c0b7b 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionMultipleAddressesTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionMultipleAddressesTest.java @@ -17,6 +17,7 @@ package org.apache.ignite.jdbc.thin; +import java.lang.management.ManagementFactory; import java.sql.Connection; import java.sql.DatabaseMetaData; import java.sql.DriverManager; @@ -24,12 +25,21 @@ import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; +import java.util.List; import java.util.concurrent.Callable; +import javax.management.MBeanServer; +import javax.management.MBeanServerInvocationHandler; +import javax.management.MalformedObjectNameException; +import javax.management.ObjectName; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.ClientConnectorConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.binary.BinaryMarshaller; import org.apache.ignite.internal.jdbc.thin.JdbcThinTcpIo; +import org.apache.ignite.internal.processors.odbc.ClientListenerProcessor; +import org.apache.ignite.internal.util.lang.GridAbsPredicate; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.mxbean.ClientProcessorMXBean; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.ipfinder.TcpDiscoveryIpFinder; import org.apache.ignite.spi.discovery.tcp.ipfinder.vm.TcpDiscoveryVmIpFinder; @@ -82,7 +92,8 @@ private static String url() { cfg.setMarshaller(new BinaryMarshaller()); cfg.setClientConnectorConfiguration( - new ClientConnectorConfiguration().setPort(jdbcPorts.get(getTestIgniteInstanceIndex(name)))); + new ClientConnectorConfiguration() + .setPort(jdbcPorts.get(getTestIgniteInstanceIndex(name)))); return cfg; } @@ -221,8 +232,121 @@ public void testMultipleAddressesOneNodeFailoverOnStreaming() throws Exception { checkReconnectOnStreaming(url(), false); } + /** + * @throws Exception If failed. + */ + public void testClientConnectionMXBean() throws Exception { + Connection conn = DriverManager.getConnection(URL_PORT_RANGE); + + try { + final Statement stmt0 = conn.createStatement(); + + stmt0.execute("SELECT 1"); + + ResultSet rs0 = stmt0.getResultSet(); + + ClientProcessorMXBean serverMxBean = null; + + // Find node which client is connected to. + for (int i = 0; i < NODES_CNT; i++) { + serverMxBean = clientProcessorBean(i); + + if (!serverMxBean.getConnections().isEmpty()) + break; + } + + assertNotNull("No ClientConnections MXBean found.", serverMxBean); + + serverMxBean.dropAllConnections(); + + GridTestUtils.assertThrows(log, new Callable() { + @Override public Object call() throws Exception { + stmt0.execute("SELECT 1"); + + return null; + } + }, SQLException.class, "Failed to communicate with Ignite cluster"); + + assertTrue(rs0.isClosed()); + assertTrue(stmt0.isClosed()); + + assertTrue(getActiveClients().isEmpty()); + + final Statement stmt1 = conn.createStatement(); + + stmt1.execute("SELECT 1"); + + ResultSet rs1 = stmt1.getResultSet(); + + // Check active clients. + List activeClients = getActiveClients(); + + assertEquals(1, activeClients.size()); + + assertTrue(rs1.next()); + assertEquals(1, rs1.getInt(1)); + + rs1.close(); + stmt1.close(); + } + finally { + conn.close(); + } + + boolean allClosed = GridTestUtils.waitForCondition(new GridAbsPredicate() { + @Override public boolean apply() { + return getActiveClients().isEmpty(); + } + }, 10_000); + + assertTrue(allClosed); + } + + /** + * Return active client list. + * + * @return clients. + */ + @NotNull private List getActiveClients() { + List activeClients = new ArrayList<>(1); + + for (int i = 0; i < NODES_CNT; i++) { + ClientProcessorMXBean mxBean = clientProcessorBean(i); + + assertNotNull(mxBean); + + activeClients.addAll(mxBean.getConnections()); + } + return activeClients; + } + + /** + * Return ClientProcessorMXBean. + * + * @return MBean. + */ + private ClientProcessorMXBean clientProcessorBean(int igniteInt) { + ObjectName mbeanName = null; + + try { + mbeanName = U.makeMBeanName(getTestIgniteInstanceName(igniteInt), "Clients", + ClientListenerProcessor.class.getSimpleName()); + } + catch (MalformedObjectNameException e) { + fail("Failed to register MBean."); + } + + MBeanServer mbeanSrv = ManagementFactory.getPlatformMBeanServer(); + + if (!mbeanSrv.isRegistered(mbeanName)) + fail("MBean is not registered: " + mbeanName.getCanonicalName()); + + return MBeanServerInvocationHandler.newProxyInstance(mbeanSrv, mbeanName, ClientProcessorMXBean.class, true); + } + /** * Check failover on restart cluster ar stop one node. + * * @param url Connection URL. * @param allNodes Restart all nodes flag. * @throws Exception If failed. @@ -254,6 +378,7 @@ private void checkReconnectOnMeta(String url, boolean allNodes) throws Exception /** * Check failover on restart cluster ar stop one node. + * * @param url Connection URL. * @param allNodes Restart all nodes flag. * @throws Exception If failed. @@ -298,6 +423,7 @@ private void checkReconnectOnStatementExecute(String url, boolean allNodes) thro /** * Check failover on restart cluster ar stop one node. + * * @param url Connection URL. * @param allNodes Restart all nodes flag. * @throws Exception If failed. @@ -340,9 +466,9 @@ private void checkReconnectOnResultSet(String url, boolean allNodes) throws Exce } } - /** * Check failover on restart cluster ar stop one node. + * * @param url Connection URL. * @param allNodes Restart all nodes flag. * @throws Exception If failed. @@ -372,9 +498,7 @@ private void checkReconnectOnStreaming(String url, boolean allNodes) throws Exce return null; } - }, SQLException.class, "Failed to communicate with Ignite cluster"); - - assertTrue(id[0] > 0); + }, SQLException.class, "Failed to communicate with Ignite cluster on JDBC streaming"); int minId = id[0]; @@ -382,6 +506,9 @@ private void checkReconnectOnStreaming(String url, boolean allNodes) throws Exce final Statement stmt1 = conn.createStatement(); + stmt1.execute("SET STREAMING 1 BATCH_SIZE 10 ALLOW_OVERWRITE 0 " + + " PER_NODE_BUFFER_SIZE 1000 FLUSH_FREQUENCY 1000"); + for (int i = 0; i < 10; ++i, id[0]++) stmt1.execute("INSERT INTO TEST(id, val) values (" + id[0] + ", " + id[0] + ")"); @@ -407,7 +534,7 @@ private void stop(Connection conn, boolean all) { else { JdbcThinTcpIo io = GridTestUtils.getFieldValue(conn, "cliIo"); - int idx = GridTestUtils.getFieldValue(io, "srvIdx"); + int idx = io.serverIndex(); stopGrid(idx); } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSSLTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSSLTest.java index cc71f51772400..355a198c56672 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSSLTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSSLTest.java @@ -164,7 +164,14 @@ public void testConnectionUseIgniteFactory() throws Exception { * @throws Exception If failed. */ public void testDefaultContext() throws Exception { + // Store exists default SSL context to restore after test. + final SSLContext dfltSslCtx = SSLContext.getDefault(); + + // Setup default context + SSLContext.setDefault(getTestSslContextFactory().create()); + setSslCtxFactoryToCli = true; + // Factory return default SSL context sslCtxFactory = new Factory() { @Override public SSLContext create() { @@ -177,23 +184,16 @@ public void testDefaultContext() throws Exception { } }; - System.setProperty("javax.net.ssl.keyStore", CLI_KEY_STORE_PATH); - System.setProperty("javax.net.ssl.keyStorePassword", "123456"); - System.setProperty("javax.net.ssl.trustStore", TRUST_KEY_STORE_PATH); - System.setProperty("javax.net.ssl.trustStorePassword", "123456"); - startGrids(1); try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1/?sslMode=require")) { checkConnection(conn); } finally { - System.getProperties().remove("javax.net.ssl.keyStore"); - System.getProperties().remove("javax.net.ssl.keyStorePassword"); - System.getProperties().remove("javax.net.ssl.trustStore"); - System.getProperties().remove("javax.net.ssl.trustStorePassword"); - stopAllGrids(); + + // Restore SSL context. + SSLContext.setDefault(dfltSslCtx); } } diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java index 14b91b260c9fe..bd816e6ef5d2c 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinConnectionSelfTest.java @@ -33,8 +33,10 @@ import java.util.concurrent.Callable; import java.util.concurrent.Executor; import java.util.concurrent.Executors; +import java.util.concurrent.atomic.AtomicInteger; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.binary.BinaryMarshaller; import org.apache.ignite.internal.jdbc.thin.JdbcThinConnection; import org.apache.ignite.internal.jdbc.thin.JdbcThinTcpIo; @@ -56,6 +58,7 @@ import static java.sql.ResultSet.TYPE_FORWARD_ONLY; import static java.sql.Statement.NO_GENERATED_KEYS; import static java.sql.Statement.RETURN_GENERATED_KEYS; +import static org.apache.ignite.testframework.GridTestUtils.RunnableX; /** * Connection test. @@ -137,7 +140,7 @@ public void testDefaults() throws Exception { * Test invalid endpoint. */ public void testInvalidEndpoint() { - assertInvalid("jdbc:ignite:thin://", "Host name is empty"); + assertInvalid("jdbc:ignite:thin://", "Address is empty"); assertInvalid("jdbc:ignite:thin://:10000", "Host name is empty"); assertInvalid("jdbc:ignite:thin:// :10000", "Host name is empty"); @@ -184,6 +187,38 @@ public void testSocketBuffers() throws Exception { } } + /** + * Test invalid socket buffer sizes with semicolon. + * + * @throws Exception If failed. + */ + public void testSocketBuffersSemicolon() throws Exception { + final int dfltDufSize = 64 * 1024; + + assertInvalid("jdbc:ignite:thin://127.0.0.1;socketSendBuffer=-1", + "Property cannot be lower than 0 [name=socketSendBuffer, value=-1]"); + + assertInvalid("jdbc:ignite:thin://127.0.0.1;socketReceiveBuffer=-1", + "Property cannot be lower than 0 [name=socketReceiveBuffer, value=-1]"); + + // Note that SO_* options are hints, so we check that value is equals to either what we set or to default. + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;socketSendBuffer=1024")) { + assertEquals(1024, io(conn).connectionProperties().getSocketSendBuffer()); + assertEquals(dfltDufSize, io(conn).connectionProperties().getSocketReceiveBuffer()); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;socketReceiveBuffer=1024")) { + assertEquals(dfltDufSize, io(conn).connectionProperties().getSocketSendBuffer()); + assertEquals(1024, io(conn).connectionProperties().getSocketReceiveBuffer()); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;" + + "socketSendBuffer=1024;socketReceiveBuffer=2048")) { + assertEquals(1024, io(conn).connectionProperties().getSocketSendBuffer()); + assertEquals(2048, io(conn).connectionProperties().getSocketReceiveBuffer()); + } + } + /** * Test SQL hints. * @@ -191,77 +226,95 @@ public void testSocketBuffers() throws Exception { */ public void testSqlHints() throws Exception { try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1")) { - assertFalse(io(conn).connectionProperties().isDistributedJoins()); - assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); - assertFalse(io(conn).connectionProperties().isCollocated()); - assertFalse(io(conn).connectionProperties().isReplicatedOnly()); - assertFalse(io(conn).connectionProperties().isLazy()); - assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); + assertHints(conn, false, false, false, false, false, false); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?distributedJoins=true")) { - assertTrue(io(conn).connectionProperties().isDistributedJoins()); - assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); - assertFalse(io(conn).connectionProperties().isCollocated()); - assertFalse(io(conn).connectionProperties().isReplicatedOnly()); - assertFalse(io(conn).connectionProperties().isLazy()); - assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); + assertHints(conn, true, false, false, false, false, false); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?enforceJoinOrder=true")) { - assertFalse(io(conn).connectionProperties().isDistributedJoins()); - assertTrue(io(conn).connectionProperties().isEnforceJoinOrder()); - assertFalse(io(conn).connectionProperties().isCollocated()); - assertFalse(io(conn).connectionProperties().isReplicatedOnly()); - assertFalse(io(conn).connectionProperties().isLazy()); - assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); + assertHints(conn, false, true, false, false, false, false); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?collocated=true")) { - assertFalse(io(conn).connectionProperties().isDistributedJoins()); - assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); - assertTrue(io(conn).connectionProperties().isCollocated()); - assertFalse(io(conn).connectionProperties().isReplicatedOnly()); - assertFalse(io(conn).connectionProperties().isLazy()); - assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); + assertHints(conn, false, false, true, false, false, false); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?replicatedOnly=true")) { - assertFalse(io(conn).connectionProperties().isDistributedJoins()); - assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); - assertFalse(io(conn).connectionProperties().isCollocated()); - assertTrue(io(conn).connectionProperties().isReplicatedOnly()); - assertFalse(io(conn).connectionProperties().isLazy()); - assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); + assertHints(conn, false, false, false, true, false, false); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?lazy=true")) { - assertFalse(io(conn).connectionProperties().isDistributedJoins()); - assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); - assertFalse(io(conn).connectionProperties().isCollocated()); - assertFalse(io(conn).connectionProperties().isReplicatedOnly()); - assertTrue(io(conn).connectionProperties().isLazy()); - assertFalse(io(conn).connectionProperties().isSkipReducerOnUpdate()); + assertHints(conn, false, false, false, false, true, false); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?skipReducerOnUpdate=true")) { - assertFalse(io(conn).connectionProperties().isDistributedJoins()); - assertFalse(io(conn).connectionProperties().isEnforceJoinOrder()); - assertFalse(io(conn).connectionProperties().isCollocated()); - assertFalse(io(conn).connectionProperties().isReplicatedOnly()); - assertFalse(io(conn).connectionProperties().isLazy()); - assertTrue(io(conn).connectionProperties().isSkipReducerOnUpdate()); + assertHints(conn, false, false, false, false, false, true); } try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1?distributedJoins=true&" + "enforceJoinOrder=true&collocated=true&replicatedOnly=true&lazy=true&skipReducerOnUpdate=true")) { - assertTrue(io(conn).connectionProperties().isDistributedJoins()); - assertTrue(io(conn).connectionProperties().isEnforceJoinOrder()); - assertTrue(io(conn).connectionProperties().isCollocated()); - assertTrue(io(conn).connectionProperties().isReplicatedOnly()); - assertTrue(io(conn).connectionProperties().isLazy()); - assertTrue(io(conn).connectionProperties().isSkipReducerOnUpdate()); + assertHints(conn, true, true, true, true, true, true); + } + } + + /** + * Test SQL hints with semicolon. + * + * @throws Exception If failed. + */ + public void testSqlHintsSemicolon() throws Exception { + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;distributedJoins=true")) { + assertHints(conn, true, false, false, false, false, false); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;enforceJoinOrder=true")) { + assertHints(conn, false, true, false, false, false, false); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;collocated=true")) { + assertHints(conn, false, false, true, false, false, false); } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;replicatedOnly=true")) { + assertHints(conn, false, false, false, true, false, false); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;lazy=true")) { + assertHints(conn, false, false, false, false, true, false); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;skipReducerOnUpdate=true")) { + assertHints(conn, false, false, false, false, false, true); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;distributedJoins=true;" + + "enforceJoinOrder=true;collocated=true;replicatedOnly=true;lazy=true;skipReducerOnUpdate=true")) { + assertHints(conn, true, true, true, true, true, true); + } + } + + /** + * Assert hints. + * + * @param conn Connection. + * @param distributedJoins Distributed joins. + * @param enforceJoinOrder Enforce join order. + * @param collocated Co-located. + * @param replicatedOnly Replicated only. + * @param lazy Lazy. + * @param skipReducerOnUpdate Skip reducer on update. + * @throws Exception If failed. + */ + private void assertHints(Connection conn, boolean distributedJoins, boolean enforceJoinOrder, boolean collocated, + boolean replicatedOnly, boolean lazy, boolean skipReducerOnUpdate)throws Exception { + assertEquals(distributedJoins, io(conn).connectionProperties().isDistributedJoins()); + assertEquals(enforceJoinOrder, io(conn).connectionProperties().isEnforceJoinOrder()); + assertEquals(collocated, io(conn).connectionProperties().isCollocated()); + assertEquals(replicatedOnly, io(conn).connectionProperties().isReplicatedOnly()); + assertEquals(lazy, io(conn).connectionProperties().isLazy()); + assertEquals(skipReducerOnUpdate, io(conn).connectionProperties().isSkipReducerOnUpdate()); } /** @@ -303,6 +356,41 @@ public void testTcpNoDelay() throws Exception { } } + /** + * Test TCP no delay property handling with semicolon. + * + * @throws Exception If failed. + */ + public void testTcpNoDelaySemicolon() throws Exception { + assertInvalid("jdbc:ignite:thin://127.0.0.1;tcpNoDelay=0", + "Invalid property value. [name=tcpNoDelay, val=0, choices=[true, false]]"); + + assertInvalid("jdbc:ignite:thin://127.0.0.1;tcpNoDelay=1", + "Invalid property value. [name=tcpNoDelay, val=1, choices=[true, false]]"); + + assertInvalid("jdbc:ignite:thin://127.0.0.1;tcpNoDelay=false1", + "Invalid property value. [name=tcpNoDelay, val=false1, choices=[true, false]]"); + + assertInvalid("jdbc:ignite:thin://127.0.0.1;tcpNoDelay=true1", + "Invalid property value. [name=tcpNoDelay, val=true1, choices=[true, false]]"); + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;tcpNoDelay=true")) { + assertTrue(io(conn).connectionProperties().isTcpNoDelay()); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;tcpNoDelay=True")) { + assertTrue(io(conn).connectionProperties().isTcpNoDelay()); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;tcpNoDelay=false")) { + assertFalse(io(conn).connectionProperties().isTcpNoDelay()); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;tcpNoDelay=False")) { + assertFalse(io(conn).connectionProperties().isTcpNoDelay()); + } + } + /** * Test autoCloseServerCursor property handling. * @@ -339,6 +427,38 @@ public void testAutoCloseServerCursorProperty() throws Exception { } } + /** + * Test autoCloseServerCursor property handling with semicolon. + * + * @throws Exception If failed. + */ + public void testAutoCloseServerCursorPropertySemicolon() throws Exception { + String url = "jdbc:ignite:thin://127.0.0.1;autoCloseServerCursor"; + + String err = "Invalid property value. [name=autoCloseServerCursor"; + + assertInvalid(url + "=0", err); + assertInvalid(url + "=1", err); + assertInvalid(url + "=false1", err); + assertInvalid(url + "=true1", err); + + try (Connection conn = DriverManager.getConnection(url + "=true")) { + assertTrue(io(conn).connectionProperties().isAutoCloseServerCursor()); + } + + try (Connection conn = DriverManager.getConnection(url + "=True")) { + assertTrue(io(conn).connectionProperties().isAutoCloseServerCursor()); + } + + try (Connection conn = DriverManager.getConnection(url + "=false")) { + assertFalse(io(conn).connectionProperties().isAutoCloseServerCursor()); + } + + try (Connection conn = DriverManager.getConnection(url + "=False")) { + assertFalse(io(conn).connectionProperties().isAutoCloseServerCursor()); + } + } + /** * Test schema property in URL. * @@ -361,6 +481,25 @@ public void testSchema() throws Exception { } } + /** + * Test schema property in URL with semicolon. + * + * @throws Exception If failed. + */ + public void testSchemaSemicolon() throws Exception { + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;schema=public")) { + assertEquals("Invalid schema", "PUBLIC", conn.getSchema()); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;schema=\"" + DEFAULT_CACHE_NAME + '"')) { + assertEquals("Invalid schema", DEFAULT_CACHE_NAME, conn.getSchema()); + } + + try (Connection conn = DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1;schema=_not_exist_schema_")) { + assertEquals("Invalid schema", "_NOT_EXIST_SCHEMA_", conn.getSchema()); + } + } + /** * Get client socket for connection. * @@ -407,7 +546,7 @@ public void testClose() throws Exception { assert conn.isClosed(); - assert !conn.isValid(2): "Connection must be closed"; + assert !conn.isValid(2) : "Connection must be closed"; GridTestUtils.assertThrows(log, new Callable() { @Override public Object call() throws Exception { @@ -432,7 +571,7 @@ public void testCreateStatement() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStatement(); } }); @@ -485,7 +624,7 @@ public void testCreateStatement2() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStatement(TYPE_FORWARD_ONLY, CONCUR_READ_ONLY); } @@ -544,7 +683,7 @@ public void testCreateStatement3() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStatement(TYPE_FORWARD_ONLY, CONCUR_READ_ONLY, HOLD_CURSORS_OVER_COMMIT); } @@ -578,7 +717,7 @@ public void testPrepareStatement() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.prepareStatement(sqlText); } }); @@ -636,7 +775,7 @@ public void testPrepareStatement3() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.prepareStatement(sqlText, TYPE_FORWARD_ONLY, CONCUR_READ_ONLY); } }); @@ -701,7 +840,7 @@ public void testPrepareStatement4() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.prepareStatement(sqlText, TYPE_FORWARD_ONLY, CONCUR_READ_ONLY, HOLD_CURSORS_OVER_COMMIT); } }); @@ -823,7 +962,7 @@ public void testNativeSql() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.nativeSQL(sqlText); } }); @@ -849,7 +988,7 @@ public void testGetSetAutoCommit() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setAutoCommit(true); } }); @@ -884,7 +1023,7 @@ public void testCommit() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.commit(); } }); @@ -919,7 +1058,7 @@ public void testRollback() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.rollback(); } }); @@ -939,7 +1078,7 @@ public void testGetMetaData() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getMetaData(); } }); @@ -955,14 +1094,14 @@ public void testGetSetReadOnly() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setReadOnly(true); } }); // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.isReadOnly(); } }); @@ -986,14 +1125,14 @@ public void testGetSetCatalog() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setCatalog(""); } }); // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getCatalog(); } }); @@ -1010,6 +1149,7 @@ public void testGetSetTransactionIsolation() throws Exception { // Invalid parameter value GridTestUtils.assertThrows(log, new Callable() { + @SuppressWarnings("MagicConstant") @Override public Object call() throws Exception { conn.setTransactionIsolation(-1); @@ -1037,14 +1177,14 @@ public void testGetSetTransactionIsolation() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getTransactionIsolation(); } }); // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setTransactionIsolation(TRANSACTION_SERIALIZABLE); } }); @@ -1070,15 +1210,14 @@ public void testClearGetWarnings() throws Exception { // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getWarnings(); } }); - // Exception when called on closed connection checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.clearWarnings(); } }); @@ -1217,7 +1356,7 @@ public void testSetSavepoint() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(); } }); @@ -1225,7 +1364,7 @@ public void testSetSavepoint() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(); } }); @@ -1271,7 +1410,7 @@ public void testSetSavepointName() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(name); } }); @@ -1279,7 +1418,7 @@ public void testSetSavepointName() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSavepoint(name); } }); @@ -1325,7 +1464,7 @@ public void testRollbackSavePoint() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.rollback(savepoint); } }); @@ -1333,7 +1472,7 @@ public void testRollbackSavePoint() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.rollback(savepoint); } }); @@ -1363,7 +1502,7 @@ public void testReleaseSavepoint() throws Exception { final Savepoint savepoint = getFakeSavepoint(); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.releaseSavepoint(savepoint); } }); @@ -1371,7 +1510,7 @@ public void testReleaseSavepoint() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.releaseSavepoint(savepoint); } }); @@ -1517,7 +1656,7 @@ public void testGetSetClientInfoPair() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getClientInfo(name); } }); @@ -1555,7 +1694,7 @@ public void testGetSetClientInfoProperties() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getClientInfo(); } }); @@ -1596,7 +1735,7 @@ public void testCreateArrayOf() throws Exception { // Unsupported checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createArrayOf(typeName, elements); } }); @@ -1604,7 +1743,7 @@ public void testCreateArrayOf() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createArrayOf(typeName, elements); } }); @@ -1632,7 +1771,7 @@ public void testCreateStruct() throws Exception { final Object[] attrs = new Object[] {100, "Tom"}; checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStruct(typeName, attrs); } }); @@ -1640,7 +1779,7 @@ public void testCreateStruct() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.createStruct(typeName, attrs); } }); @@ -1667,13 +1806,13 @@ public void testGetSetSchema() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setSchema(schema); } }); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getSchema(); } }); @@ -1751,13 +1890,13 @@ public void testGetSetNetworkTimeout() throws Exception { conn.close(); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.getNetworkTimeout(); } }); checkConnectionClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { conn.setNetworkTimeout(executor, timeout); } }); @@ -1781,15 +1920,51 @@ public void testSslClientAndPlainServer() { } /** + * @throws Exception If failed. */ - public void testAuthenticateDisableOnServerClientTryAuthenticate() { - GridTestUtils.assertThrows(log, new Callable() { - @Override public Object call() throws Exception { - DriverManager.getConnection("jdbc:ignite:thin://127.0.0.1/?user=test&password=test"); + public void testMultithreadingException() throws Exception { + int threadCnt = 10; - return null; + final boolean end[] = new boolean[] {false}; + + final SQLException exs [] = new SQLException[threadCnt]; + + final AtomicInteger exCnt = new AtomicInteger(0); + + try (final Connection conn = DriverManager.getConnection(URL)) { + final IgniteInternalFuture f = GridTestUtils.runMultiThreadedAsync(new Runnable() { + @Override public void run() { + try { + conn.createStatement(); + + while (!end[0]) + conn.createStatement().execute("SELECT 1"); + + conn.createStatement().execute("SELECT 1"); + } + catch (SQLException e) { + end[0] = true; + exs[exCnt.getAndIncrement()] = e; + } + catch (Exception e) { + e.printStackTrace(System.err); + + fail("Unexpected exception (see details above): " + e.getMessage()); + } + } + }, threadCnt, "run-query"); + + f.get(); + + boolean exceptionFound = false; + + for (SQLException e : exs) { + if (e != null && e.getMessage().contains("Concurrent access to JDBC connection is not allowed")) + exceptionFound = true; } - }, SQLException.class, "Can not perform the operation because the authentication is not enabled for the cluster"); + + assertTrue("Concurrent access to JDBC connection is not allowed", exceptionFound); + } } /** @@ -1806,4 +1981,4 @@ private Savepoint getFakeSavepoint() { } }; } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementLeakTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementLeakTest.java new file mode 100644 index 0000000000000..f9c1de71e0331 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementLeakTest.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import org.apache.ignite.IgniteJdbcThinDriver; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.junit.Test; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.util.Properties; +import java.util.Set; + +/** + * Prepared statement leaks test. + */ +@SuppressWarnings("ThrowableNotThrown") +public class JdbcThinPreparedStatementLeakTest extends JdbcThinAbstractSelfTest { + /** URL. */ + private static final String URL = "jdbc:ignite:thin://127.0.0.1/"; + + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + startGrid(); + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + stopAllGrids(); + + super.afterTest(); + } + + /** + * @throws Exception If failed. + */ + @SuppressWarnings("StatementWithEmptyBody") + @Test + public void test() throws Exception { + try (Connection conn = new IgniteJdbcThinDriver().connect(URL, new Properties())) { + for (int i = 0; i < 50000; ++i) { + try (PreparedStatement st = conn.prepareStatement("select 1")) { + ResultSet rs = st.executeQuery(); + + while (rs.next()) { + // No-op. + } + + rs.close(); + } + } + + Set stmts = U.field(conn, "stmts"); + + assertEquals(0, stmts.size()); + } + } +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java index c5778537096f1..4635702a9dd78 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinPreparedStatementSelfTest.java @@ -797,146 +797,146 @@ public void testClearParameter() throws Exception { public void testNotSupportedTypes() throws Exception { stmt = conn.prepareStatement(""); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setArray(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setAsciiStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setAsciiStream(1, null, 0); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setAsciiStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBinaryStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBinaryStream(1, null, 0); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBinaryStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBlob(1, (Blob)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBlob(1, (InputStream)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setBlob(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCharacterStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCharacterStream(1, null, 0); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCharacterStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setClob(1, (Clob)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setClob(1, (Reader)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setClob(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNCharacterStream(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNCharacterStream(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNClob(1, (NClob)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNClob(1, (Reader)null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setNClob(1, null, 0L); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setRowId(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setRef(1, null); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setSQLXML(1, null); } }); @@ -1055,4 +1055,4 @@ private TestObject(int id) { this.id = id; } } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java index fd46cdaf56b5f..94713afe873fa 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinResultSetSelfTest.java @@ -38,7 +38,6 @@ import java.util.GregorianCalendar; import java.util.concurrent.Callable; import org.apache.ignite.IgniteCache; -import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cache.query.annotations.QuerySqlField; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.IgniteConfiguration; @@ -50,6 +49,9 @@ import static org.apache.ignite.cache.CacheMode.PARTITIONED; import static org.apache.ignite.cache.CacheWriteSynchronizationMode.FULL_SYNC; +import static org.apache.ignite.testframework.GridTestUtils.RunnableX; +import static org.apache.ignite.testframework.GridTestUtils.assertThrows; +import static org.apache.ignite.testframework.GridTestUtils.assertThrowsAnyCause; /** * Result set test. @@ -156,7 +158,7 @@ private TestObject createObjectWithData(int id) throws MalformedURLException { o.floatVal = 1.0f; o.doubleVal = 1.0d; o.bigVal = new BigDecimal(1); - o.strVal = "1.0"; + o.strVal = "1"; o.arrVal = new byte[] {1}; o.dateVal = new Date(1, 1, 1); o.timeVal = new Time(1, 1, 1); @@ -553,27 +555,27 @@ public void testString() throws Exception { while (rs.next()) { if (cnt == 0) { - assert "1.0".equals(rs.getString("strVal")); - - assert rs.getBoolean(7); - assert rs.getByte(7) == 1; - assert rs.getShort(7) == 1; - assert rs.getInt(7) == 1; - assert rs.getLong(7) == 1; - assert rs.getDouble(7) == 1.0; - assert rs.getFloat(7) == 1.0f; - assert rs.getBigDecimal(7).equals(new BigDecimal(1)); - assert rs.getString(7).equals("1.0"); - - assert rs.getObject(7, Boolean.class); - assert rs.getObject(7, Byte.class) == 1; - assert rs.getObject(7, Short.class) == 1; - assert rs.getObject(7, Integer.class) == 1; - assert rs.getObject(7, Long.class) == 1; - assert rs.getObject(7, Float.class) == 1.f; - assert rs.getObject(7, Double.class) == 1; - assert rs.getObject(7, BigDecimal.class).equals(new BigDecimal(1)); - assert rs.getObject(7, String.class).equals("1.0"); + assert "1".equals(rs.getString("strVal")); + + assert rs.getBoolean(10); + assert rs.getByte(10) == 1; + assert rs.getShort(10) == 1; + assert rs.getInt(10) == 1; + assert rs.getLong(10) == 1; + assert rs.getDouble(10) == 1.0; + assert rs.getFloat(10) == 1.0f; + assert rs.getBigDecimal(10).equals(new BigDecimal("1")); + assert rs.getString(10).equals("1"); + + assert rs.getObject(10, Boolean.class); + assert rs.getObject(10, Byte.class) == 1; + assert rs.getObject(10, Short.class) == 1; + assert rs.getObject(10, Integer.class) == 1; + assert rs.getObject(10, Long.class) == 1; + assert rs.getObject(10, Float.class) == 1.f; + assert rs.getObject(10, Double.class) == 1; + assert rs.getObject(10, BigDecimal.class).equals(new BigDecimal(1)); + assert rs.getObject(10, String.class).equals("1"); } cnt++; @@ -773,133 +775,133 @@ public void testNotSupportedTypes() throws Exception { assert rs.next(); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getArray(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getArray("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getAsciiStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getAsciiStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBinaryStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBinaryStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBlob(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBlob("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getClob(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getClob("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getCharacterStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getCharacterStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNCharacterStream(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNCharacterStream("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNClob(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getNClob("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRef(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRef("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRowId(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRowId("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getSQLXML(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getSQLXML("id"); } }); @@ -914,499 +916,499 @@ public void testUpdateNotSupported() throws Exception { assert rs.next(); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBoolean(1, true); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBoolean("id", true); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateByte(1, (byte)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateByte("id", (byte)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateShort(1, (short)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateShort("id", (short)0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateInt(1, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateInt("id", 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateLong(1, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateLong("id", 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateFloat(1, (float)0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateFloat("id", (float)0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDouble(1, 0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDouble("id", 0.0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateString(1, ""); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateString("id", ""); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTime(1, new Time(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTime("id", new Time(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDate(1, new Date(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateDate("id", new Date(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTimestamp(1, new Timestamp(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateTimestamp("id", new Timestamp(0)); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBytes(1, new byte[]{}); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBytes("id", new byte[]{}); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateArray(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateArray("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob(1, (Blob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob(1, (InputStream)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob("id", (Blob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob("id", (InputStream)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBlob("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob(1, (Clob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob(1, (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob("id", (Clob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob("id", (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateClob("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob(1, (NClob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob(1, (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob("id", (NClob)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob("id", (Reader)null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNClob("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateAsciiStream("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateCharacterStream("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream(1, null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNCharacterStream("id", null, 0L); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRef(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRef("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRowId(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRowId("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNString(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNString("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateSQLXML(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateSQLXML("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject(1, null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateObject("id", null, 0); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBigDecimal(1, null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateBigDecimal("id", null); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNull(1); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateNull("id"); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.cancelRowUpdates(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.updateRow(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.deleteRow(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.insertRow(); } }); checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.moveToInsertRow(); } }); @@ -1424,235 +1426,235 @@ public void testExceptionOnClosedResultSet() throws Exception { rs.close(); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBoolean(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBoolean("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getByte(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getByte("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getShort(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getShort("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getInt(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getInt("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getLong(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getLong("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getFloat(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getFloat("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDouble(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDouble("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getString(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getString("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBytes(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getBytes("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate(1, new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getDate("id", new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime(1, new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTime("id", new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp(1); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp(1, new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getTimestamp("id", new GregorianCalendar()); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.wasNull(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getMetaData(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.next(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.last(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.afterLast(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.beforeFirst(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.first(); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.findColumn("id"); } }); checkResultSetClosed(new RunnableX() { - @Override public void run() throws Exception { + @Override public void runx() throws Exception { rs.getRow(); } }); @@ -1848,4 +1850,4 @@ private TestObjectField(int a, String b) { return S.toString(TestObjectField.class, this); } } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java index 82c0512c7ab70..10dad914960cd 100644 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStatementSelfTest.java @@ -132,7 +132,7 @@ public class JdbcThinStatementSelfTest extends JdbcThinAbstractSelfTest { public void testExecuteQuery0() throws Exception { ResultSet rs = stmt.executeQuery(SQL); - assert rs != null; + assertNotNull(rs); int cnt = 0; @@ -140,22 +140,22 @@ public void testExecuteQuery0() throws Exception { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals("Mike", rs.getString("firstName")); + assertEquals("Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 2; + assertEquals(2, cnt); } /** @@ -177,8 +177,8 @@ public void testExecuteQuery1() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeQuery(sqlText); } }); @@ -188,15 +188,13 @@ public void testExecuteQuery1() throws Exception { * @throws Exception If failed. */ public void testExecute() throws Exception { - assert stmt.execute(SQL); + assertTrue(stmt.execute(SQL)); - assert stmt.getUpdateCount() == -1 : "Update count must be -1 for SELECT query"; + assertEquals("Update count must be -1 for SELECT query", -1, stmt.getUpdateCount()); ResultSet rs = stmt.getResultSet(); - assert rs != null; - - assert stmt.getResultSet() == null; + assertNotNull(rs); int cnt = 0; @@ -204,22 +202,24 @@ public void testExecute() throws Exception { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals( "Mike", rs.getString("firstName")); + assertEquals( "Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 2; + assertEquals(2, cnt); + + assertFalse("Statement has more results.", stmt.getMoreResults()); } /** @@ -228,11 +228,11 @@ else if (id == 3) { public void testMaxRows() throws Exception { stmt.setMaxRows(1); - assert stmt.getMaxRows() == 1; + assertEquals(1, stmt.getMaxRows()); ResultSet rs = stmt.executeQuery(SQL); - assert rs != null; + assertNotNull(rs); int cnt = 0; @@ -240,28 +240,28 @@ public void testMaxRows() throws Exception { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals( "Mike", rs.getString("firstName")); + assertEquals( "Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 1; + assertEquals(1, cnt); stmt.setMaxRows(0); rs = stmt.executeQuery(SQL); - assert rs != null; + assertNotNull(rs); cnt = 0; @@ -269,22 +269,22 @@ else if (id == 3) { int id = rs.getInt("id"); if (id == 2) { - assert "Joe".equals(rs.getString("firstName")); - assert "Black".equals(rs.getString("lastName")); - assert rs.getInt("age") == 35; + assertEquals("Joe", rs.getString("firstName")); + assertEquals("Black", rs.getString("lastName")); + assertEquals(35, rs.getInt("age")); } else if (id == 3) { - assert "Mike".equals(rs.getString("firstName")); - assert "Green".equals(rs.getString("lastName")); - assert rs.getInt("age") == 40; + assertEquals( "Mike", rs.getString("firstName")); + assertEquals( "Green", rs.getString("lastName")); + assertEquals(40, rs.getInt("age")); } else - assert false : "Wrong ID: " + id; + fail("Wrong ID: " + id); cnt++; } - assert cnt == 2; + assertEquals(2, cnt); } /** @@ -295,14 +295,14 @@ public void testCloseResultSet0() throws Exception { ResultSet rs1 = stmt.executeQuery(SQL); ResultSet rs2 = stmt.executeQuery(SQL); - assert rs0.isClosed() : "ResultSet must be implicitly closed after re-execute statement"; - assert rs1.isClosed() : "ResultSet must be implicitly closed after re-execute statement"; + assertTrue("ResultSet must be implicitly closed after re-execute statement", rs0.isClosed()); + assertTrue("ResultSet must be implicitly closed after re-execute statement", rs1.isClosed()); - assert !rs2.isClosed() : "Last result set must be available"; + assertFalse("Last result set must be available", rs2.isClosed()); stmt.close(); - assert rs2.isClosed() : "ResultSet must be explicitly closed after close statement"; + assertTrue("ResultSet must be explicitly closed after close statement", rs2.isClosed()); } /** @@ -315,7 +315,7 @@ public void testCloseResultSet1() throws Exception { stmt.close(); - assert rs.isClosed() : "ResultSet must be explicitly closed after close statement"; + assertTrue("ResultSet must be explicitly closed after close statement", rs.isClosed()); } /** @@ -326,66 +326,66 @@ public void testCloseResultSetByConnectionClose() throws Exception { conn.close(); - assert stmt.isClosed() : "Statement must be implicitly closed after close connection"; - assert rs.isClosed() : "ResultSet must be implicitly closed after close connection"; + assertTrue("Statement must be implicitly closed after close connection", stmt.isClosed()); + assertTrue("ResultSet must be implicitly closed after close connection", rs.isClosed()); } /** * @throws Exception If failed. */ public void testCloseOnCompletionAfterQuery() throws Exception { - assert !stmt.isCloseOnCompletion() : "Invalid default closeOnCompletion"; + assertFalse("Invalid default closeOnCompletion", stmt.isCloseOnCompletion()); ResultSet rs0 = stmt.executeQuery(SQL); ResultSet rs1 = stmt.executeQuery(SQL); - assert rs0.isClosed() : "Result set must be closed implicitly"; + assertTrue("Result set must be closed implicitly", rs0.isClosed()); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); rs1.close(); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); ResultSet rs2 = stmt.executeQuery(SQL); stmt.closeOnCompletion(); - assert stmt.isCloseOnCompletion() : "Invalid closeOnCompletion"; + assertTrue("Invalid closeOnCompletion", stmt.isCloseOnCompletion()); rs2.close(); - assert stmt.isClosed() : "Statement must be closed"; + assertTrue("Statement must be closed", stmt.isClosed()); } /** * @throws Exception If failed. */ public void testCloseOnCompletionBeforeQuery() throws Exception { - assert !stmt.isCloseOnCompletion() : "Invalid default closeOnCompletion"; + assertFalse("Invalid default closeOnCompletion", stmt.isCloseOnCompletion()); ResultSet rs0 = stmt.executeQuery(SQL); ResultSet rs1 = stmt.executeQuery(SQL); - assert rs0.isClosed() : "Result set must be closed implicitly"; + assertTrue("Result set must be closed implicitly", rs0.isClosed()); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); rs1.close(); - assert !stmt.isClosed() : "Statement must not be closed"; + assertFalse("Statement must not be closed", stmt.isClosed()); stmt.closeOnCompletion(); ResultSet rs2 = stmt.executeQuery(SQL); - assert stmt.isCloseOnCompletion() : "Invalid closeOnCompletion"; + assertTrue("Invalid closeOnCompletion", stmt.isCloseOnCompletion()); rs2.close(); - assert stmt.isClosed() : "Statement must be closed"; + assertTrue("Statement must be closed", stmt.isClosed()); } /** @@ -414,7 +414,7 @@ public void testExecuteQueryTimeout() throws Exception { * @throws Exception If failed. */ public void testExecuteQueryMultipleOnlyResultSets() throws Exception { - assert conn.getMetaData().supportsMultipleResultSets(); + assertTrue(conn.getMetaData().supportsMultipleResultSets()); int stmtCnt = 10; @@ -543,8 +543,8 @@ public void testExecuteUpdate() throws Exception { stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate(sqlText); } }); @@ -634,15 +634,15 @@ public void testGetSetMaxFieldSizeUnsupported() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMaxFieldSize(); } }); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setMaxFieldSize(100); } }); @@ -684,15 +684,15 @@ public void testGetSetMaxRows() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMaxRows(); } }); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setMaxRows(maxRows); } }); @@ -728,8 +728,8 @@ public void testSetEscapeProcessing() throws Exception { stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setEscapeProcessing(true); } }); @@ -765,15 +765,15 @@ public void testGetSetQueryTimeout() throws Exception { stmt.close(); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getQueryTimeout(); } }); // Call on a closed statement - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setQueryTimeout(timeout); } }); @@ -783,7 +783,7 @@ public void testGetSetQueryTimeout() throws Exception { * @throws Exception If failed. */ public void testMaxFieldSize() throws Exception { - assert stmt.getMaxFieldSize() >= 0; + assertTrue(stmt.getMaxFieldSize() >= 0); GridTestUtils.assertThrows(log, new Callable() { @@ -797,8 +797,8 @@ public void testMaxFieldSize() throws Exception { "Invalid field limit" ); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setMaxFieldSize(100); } }); @@ -808,22 +808,22 @@ public void testMaxFieldSize() throws Exception { * @throws Exception If failed. */ public void testQueryTimeout() throws Exception { - assert stmt.getQueryTimeout() == 0 : "Default timeout invalid: " + stmt.getQueryTimeout(); + assertEquals("Default timeout invalid: " + stmt.getQueryTimeout(), 0, stmt.getQueryTimeout()); stmt.setQueryTimeout(10); - assert stmt.getQueryTimeout() == 10; + assertEquals(10, stmt.getQueryTimeout()); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getQueryTimeout(); } }); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setQueryTimeout(10); } }); @@ -835,18 +835,18 @@ public void testQueryTimeout() throws Exception { public void testWarningsOnClosedStatement() throws Exception { stmt.clearWarnings(); - assert stmt.getWarnings() == null; + assertNull(null, stmt.getWarnings()); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getWarnings(); } }); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.clearWarnings(); } }); @@ -856,16 +856,16 @@ public void testWarningsOnClosedStatement() throws Exception { * @throws Exception If failed. */ public void testCursorName() throws Exception { - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCursorName("test"); } }); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setCursorName("test"); } }); @@ -875,22 +875,22 @@ public void testCursorName() throws Exception { * @throws Exception If failed. */ public void testGetMoreResults() throws Exception { - assert !stmt.getMoreResults(); + assertFalse(stmt.getMoreResults()); stmt.execute("select 1; "); ResultSet rs = stmt.getResultSet(); - assert !stmt.getMoreResults(); + assertFalse(stmt.getMoreResults()); - assert stmt.getResultSet() == null; + assertNull(stmt.getResultSet()); - assert rs.isClosed(); + assertTrue(rs.isClosed()); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMoreResults(); } }); @@ -899,37 +899,59 @@ public void testGetMoreResults() throws Exception { /** * @throws Exception If failed. */ - public void testGetMoreResults1() throws Exception { - assert !stmt.getMoreResults(Statement.CLOSE_CURRENT_RESULT); - assert !stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); - assert !stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS); + public void testGetMoreResultsKeepCurrent() throws Exception { + assertFalse(stmt.getMoreResults(Statement.CLOSE_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS)); stmt.execute("select 1; "); ResultSet rs = stmt.getResultSet(); - assert !stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); + assertFalse(stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT)); - assert !rs.isClosed(); + assertFalse(rs.isClosed()); - assert !stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS); + stmt.close(); - assert rs.isClosed(); + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { + stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); + } + }); + } + + /** + * @throws Exception If failed. + */ + @org.junit.Test + public void testGetMoreResultsCloseAll() throws Exception { + assertFalse(stmt.getMoreResults(Statement.CLOSE_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT)); + assertFalse(stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS)); + + stmt.execute("select 1; "); + + ResultSet rs = stmt.getResultSet(); + + assertFalse(stmt.getMoreResults(Statement.CLOSE_ALL_RESULTS)); stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getMoreResults(Statement.KEEP_CURRENT_RESULT); } }); } /** + * Verifies that emty batch can be performed. + * * @throws Exception If failed. */ public void testBatchEmpty() throws Exception { - assert conn.getMetaData().supportsBatchUpdates(); + assertTrue(conn.getMetaData().supportsBatchUpdates()); stmt.addBatch(""); stmt.clearBatch(); @@ -951,7 +973,7 @@ public void testBatchEmpty() throws Exception { * @throws Exception If failed. */ public void testFetchDirection() throws Exception { - assert stmt.getFetchDirection() == ResultSet.FETCH_FORWARD; + assertEquals(ResultSet.FETCH_FORWARD, stmt.getFetchDirection()); GridTestUtils.assertThrows(log, new Callable() { @@ -967,14 +989,14 @@ public void testFetchDirection() throws Exception { stmt.close(); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.setFetchDirection(-1); } }); - checkStatementClosed(new RunnableX() { - @Override public void run() throws Exception { + checkStatementClosed(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getFetchDirection(); } }); @@ -1006,46 +1028,46 @@ public void testAutogenerated() throws Exception { SQLException.class, "Invalid autoGeneratedKeys value"); - assert !conn.getMetaData().supportsGetGeneratedKeys(); + assertFalse(conn.getMetaData().supportsGetGeneratedKeys()); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.getGeneratedKeys(); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate("select 1", Statement.RETURN_GENERATED_KEYS); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate("select 1", new int[] {1, 2}); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.executeUpdate("select 1", new String[] {"a", "b"}); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.execute("select 1", Statement.RETURN_GENERATED_KEYS); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.execute("select 1", new int[] {1, 2}); } }); - checkNotSupported(new RunnableX() { - @Override public void run() throws Exception { + checkNotSupported(new GridTestUtils.RunnableX() { + @Override public void runx() throws Exception { stmt.execute("select 1", new String[] {"a", "b"}); } }); @@ -1115,7 +1137,7 @@ public void testStatementTypeMismatchSelectForCachedQuery() throws Exception { SQLException.class, "Given statement type does not match that declared by JDBC driver"); - assert stmt.getResultSet() == null : "Not results expected. Last statement is executed with exception"; + assertNull("Not results expected. Last statement is executed with exception", stmt.getResultSet()); } /** @@ -1137,18 +1159,20 @@ public void testStatementTypeMismatchUpdate() throws Exception { boolean next = rs.next(); - assert next; + assertTrue(next); - assert rs.getInt(1) == 1 : "The data must not be updated. " + + assertEquals("The data must not be updated. " + "Because update statement is executed via 'executeQuery' method." + - " Data [val=" + rs.getInt(1) + ']'; + " Data [val=" + rs.getInt(1) + ']', + 1, + rs.getInt(1)); } /** */ private void fillCache() { IgniteCache cachePerson = grid(0).cache(DEFAULT_CACHE_NAME); - assert cachePerson != null; + assertNotNull(cachePerson); cachePerson.put("p1", new Person(1, "John", "White", 25)); cachePerson.put("p2", new Person(2, "Joe", "Black", 35)); @@ -1229,4 +1253,4 @@ private Person(int id, String firstName, String lastName, int age) { this.age = age; } } -} \ No newline at end of file +} diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingAbstractSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingAbstractSelfTest.java new file mode 100644 index 0000000000000..70046356b210b --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingAbstractSelfTest.java @@ -0,0 +1,505 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.concurrent.Callable; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteDataStreamer; +import org.apache.ignite.cache.CachePeekMode; +import org.apache.ignite.cache.query.FieldsQueryCursor; +import org.apache.ignite.cache.query.SqlFieldsQuery; +import org.apache.ignite.internal.jdbc2.JdbcStreamingSelfTest; +import org.apache.ignite.internal.processors.query.GridQueryCancel; +import org.apache.ignite.internal.processors.query.GridQueryProcessor; +import org.apache.ignite.internal.processors.query.SqlClientContext; +import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.testframework.GridTestUtils; +import org.jetbrains.annotations.Nullable; + +/** + * Tests for streaming via thin driver. + */ +public abstract class JdbcThinStreamingAbstractSelfTest extends JdbcStreamingSelfTest { + /** */ + protected int batchSize = 17; + + /** {@inheritDoc} */ + @Override protected void beforeTestsStarted() throws Exception { + GridQueryProcessor.idxCls = IndexingWithContext.class; + + super.beforeTestsStarted(); + + batchSize = 17; + } + + /** {@inheritDoc} */ + @Override protected void beforeTest() throws Exception { + super.beforeTest(); + + // Init IndexingWithContext.cliCtx + try (Connection c = createOrdinaryConnection()) { + execute(c, "SELECT 1"); + } + } + + /** {@inheritDoc} */ + @Override protected void afterTest() throws Exception { + try (Connection c = createOrdinaryConnection()) { + execute(c, "DROP TABLE PUBLIC.T IF EXISTS"); + } + + IndexingWithContext.cliCtx = null; + + super.afterTest(); + } + + /** {@inheritDoc} */ + @Override protected Connection createOrdinaryConnection() throws SQLException { + return JdbcThinAbstractSelfTest.connect(grid(0), null); + } + + /** + * @throws Exception if failed. + */ + public void testStreamedBatchedInsert() throws Exception { + for (int i = 10; i <= 100; i += 10) + put(i, nameForId(i * 100)); + + try (Connection conn = createStreamedConnection(false)) { + assertStreamingState(true); + + try (PreparedStatement stmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?), " + + "(?, ?)")) { + for (int i = 1; i <= 100; i+= 2) { + stmt.setInt(1, i); + stmt.setString(2, nameForId(i)); + stmt.setInt(3, i + 1); + stmt.setString(4, nameForId(i + 1)); + + stmt.addBatch(); + } + + stmt.executeBatch(); + } + } + + U.sleep(500); + + // Now let's check it's all there. + for (int i = 1; i <= 100; i++) { + if (i % 10 != 0) + assertEquals(nameForId(i), nameForIdInCache(i)); + else // All that divides by 10 evenly should point to numbers 100 times greater - see above + assertEquals(nameForId(i * 100), nameForIdInCache(i)); + } + } + + /** + * @throws SQLException if failed. + */ + public void testSimultaneousStreaming() throws Exception { + try (Connection anotherConn = createOrdinaryConnection()) { + execute(anotherConn, "CREATE TABLE PUBLIC.T(x int primary key, y int) WITH " + + "\"cache_name=T,wrap_value=false\""); + } + + // Timeout to let connection close be handled on server side. + U.sleep(500); + + try (Connection conn = createStreamedConnection(false, 10000)) { + assertStreamingState(true); + + PreparedStatement firstStmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)"); + + PreparedStatement secondStmt = conn.prepareStatement("insert into PUBLIC.T(x, y) values (?, ?)"); + + try { + for (int i = 1; i <= 10; i++) { + firstStmt.setInt(1, i); + firstStmt.setString(2, nameForId(i)); + + firstStmt.executeUpdate(); + } + + for (int i = 51; i <= 67; i++) { + secondStmt.setInt(1, i); + secondStmt.setInt(2, i); + + secondStmt.executeUpdate(); + } + + for (int i = 11; i <= 50; i++) { + firstStmt.setInt(1, i); + firstStmt.setString(2, nameForId(i)); + + firstStmt.executeUpdate(); + } + + for (int i = 68; i <= 100; i++) { + secondStmt.setInt(1, i); + secondStmt.setInt(2, i); + + secondStmt.executeUpdate(); + } + + assertCacheEmpty(); + + SqlClientContext cliCtx = sqlClientContext(); + + final HashMap> streamers = U.field(cliCtx, "streamers"); + + // Wait when node process requests (because client send batch requests async). + GridTestUtils.waitForCondition(() -> streamers.size() == 2, 1000); + + assertEquals(2, streamers.size()); + + assertEqualsCollections(new HashSet<>(Arrays.asList("person", "T")), streamers.keySet()); + } + finally { + U.closeQuiet(firstStmt); + + U.closeQuiet(secondStmt); + } + } + + // Let's wait a little so that all data arrives to destination - we can't intercept streamers' flush + // on connection close in any way. + U.sleep(1000); + + // Now let's check it's all there. + for (int i = 1; i <= 50; i++) + assertEquals(nameForId(i), nameForIdInCache(i)); + + for (int i = 51; i <= 100; i++) + assertEquals(i, grid(0).cache("T").get(i)); + } + + /** + * + */ + public void testStreamingWithMixedStatementTypes() throws Exception { + String prepStmtStr = "insert into Person(\"id\", \"name\") values (?, ?)"; + + String stmtStr = "insert into Person(\"id\", \"name\") values (%d, '%s')"; + + try (Connection conn = createStreamedConnection(false, 10000)) { + assertStreamingState(true); + + PreparedStatement firstStmt = conn.prepareStatement(prepStmtStr); + + Statement secondStmt = conn.createStatement(); + + try { + for (int i = 1; i <= 100; i++) { + boolean usePrep = Math.random() > 0.5; + + boolean useBatch = Math.random() > 0.5; + + if (usePrep) { + firstStmt.setInt(1, i); + firstStmt.setString(2, nameForId(i)); + + if (useBatch) + firstStmt.addBatch(); + else + firstStmt.execute(); + } + else { + String sql = String.format(stmtStr, i, nameForId(i)); + + if (useBatch) + secondStmt.addBatch(sql); + else + secondStmt.execute(sql); + } + } + } + finally { + U.closeQuiet(firstStmt); + + U.closeQuiet(secondStmt); + } + } + + // Let's wait a little so that all data arrives to destination - we can't intercept streamers' flush + // on connection close in any way. + U.sleep(1000); + + // Now let's check it's all there. + for (int i = 1; i <= 100; i++) + assertEquals(nameForId(i), nameForIdInCache(i)); + } + + /** + * @throws SQLException if failed. + */ + public void testStreamingOffToOn() throws Exception { + try (Connection conn = createOrdinaryConnection()) { + assertStreamingState(false); + + execute(conn, "SET STREAMING 1"); + + assertStreamingState(true); + } + } + + /** + * @throws SQLException if failed. + */ + public void testStreamingOffToOff() throws Exception { + try (Connection conn = createOrdinaryConnection()) { + assertStreamingState(false); + + execute(conn, "SET STREAMING 0"); + + assertStreamingState(false); + } + } + + /** + * @throws SQLException if failed. + */ + public void testStreamingOnToOff() throws Exception { + try (Connection conn = createStreamedConnection(false)) { + assertStreamingState(true); + + execute(conn, "SET STREAMING off"); + + assertStreamingState(false); + } + } + + /** + * @throws SQLException if failed. + */ + public void testFlush() throws Exception { + try (Connection conn = createStreamedConnection(false, 10000)) { + assertStreamingState(true); + + try (PreparedStatement stmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)")) { + for (int i = 1; i <= 100; i++) { + stmt.setInt(1, i); + stmt.setString(2, nameForId(i)); + + stmt.executeUpdate(); + } + } + + assertCacheEmpty(); + + execute(conn, "set streaming 0"); + + assertStreamingState(false); + + U.sleep(500); + + // Now let's check it's all there. + for (int i = 1; i <= 100; i++) + assertEquals(nameForId(i), nameForIdInCache(i)); + } + } + + /** + * @throws SQLException if failed. + */ + public void testStreamingReEnabled() throws Exception { + try (Connection conn = createStreamedConnection(false, 10000)) { + assertStreamingState(true); + + try (PreparedStatement stmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)")) { + for (int i = 1; i <= 100; i++) { + stmt.setInt(1, i); + stmt.setString(2, nameForId(i)); + + stmt.executeUpdate(); + } + } + + assertCacheEmpty(); + + execute(conn, "set streaming 1 batch_size 111 allow_overwrite 0 per_node_buffer_size 512 " + + "per_node_parallel_operations 4 flush_frequency 5000"); + + U.sleep(500); + + assertEquals((Integer)111, U.field((Object)U.field(conn, "streamState"), "streamBatchSize")); + + SqlClientContext cliCtx = sqlClientContext(); + + assertTrue(cliCtx.isStream()); + + assertFalse(U.field(cliCtx, "streamAllowOverwrite")); + + assertEquals((Integer)512, U.field(cliCtx, "streamNodeBufSize")); + + assertEquals((Long)5000L, U.field(cliCtx, "streamFlushTimeout")); + + assertEquals((Integer)4, U.field(cliCtx, "streamNodeParOps")); + + // Now let's check it's all there - SET STREAMING 1 repeated call must also have caused flush. + for (int i = 1; i <= 100; i++) + assertEquals(nameForId(i), nameForIdInCache(i)); + } + } + + /** + * + */ + @SuppressWarnings("ThrowableNotThrown") + public void testNonStreamedBatch() { + GridTestUtils.assertThrows(null, new Callable() { + @Override public Object call() throws Exception { + try (Connection conn = createOrdinaryConnection()) { + try (Statement s = conn.createStatement()) { + for (int i = 1; i <= 10; i++) + s.addBatch(String.format("insert into Person(\"id\", \"name\")values (%d, '%s')", i, + nameForId(i))); + + execute(conn, "SET STREAMING 1"); + + s.addBatch(String.format("insert into Person(\"id\", \"name\")values (%d, '%s')", 11, + nameForId(11))); + } + } + + return null; + } + }, SQLException.class, "Statement has non-empty batch (call executeBatch() or clearBatch() before " + + "enabling streaming)."); + } + + /** + * + */ + @SuppressWarnings("ThrowableNotThrown") + public void testStreamingStatementInTheMiddleOfNonPreparedBatch() { + GridTestUtils.assertThrows(null, new Callable() { + @Override public Object call() throws Exception { + try (Connection conn = createOrdinaryConnection()) { + try (Statement s = conn.createStatement()) { + s.addBatch(String.format("insert into Person(\"id\", \"name\")values (%d, '%s')", 1, + nameForId(1))); + + s.addBatch("SET STREAMING 1 FLUSH_FREQUENCY 10000"); + } + } + + return null; + } + }, SQLException.class, "Streaming control commands must be executed explicitly"); + } + + /** + * + */ + @SuppressWarnings("ThrowableNotThrown") + public void testBatchingSetStreamingStatement() { + GridTestUtils.assertThrows(null, new Callable() { + @Override public Object call() throws Exception { + try (Connection conn = createOrdinaryConnection()) { + try (PreparedStatement s = conn.prepareStatement("SET STREAMING 1 FLUSH_FREQUENCY 10000")) { + s.addBatch(); + } + } + + return null; + } + }, SQLException.class, "Streaming control commands must be executed explicitly"); + } + + /** + * Check that there's nothing in cache. + */ + protected void assertCacheEmpty() { + assertEquals(0, cache().size(CachePeekMode.ALL)); + } + + /** + * @param conn Connection. + * @param sql Statement. + * @throws SQLException if failed. + */ + protected static void execute(Connection conn, String sql) throws SQLException { + try (Statement s = conn.createStatement()) { + s.execute(sql); + } + } + + /** + * @return Active SQL client context. + */ + private SqlClientContext sqlClientContext() { + assertNotNull(IndexingWithContext.cliCtx); + + return IndexingWithContext.cliCtx; + } + + /** + * Check that streaming state on target node is as expected. + * + * @param on Expected streaming state. + */ + protected void assertStreamingState(boolean on) throws Exception { + SqlClientContext cliCtx = sqlClientContext(); + + GridTestUtils.waitForCondition(() -> cliCtx.isStream() == on, 1000); + + assertEquals(on, cliCtx.isStream()); + } + + /** {@inheritDoc} */ + @Override protected void assertStatementForbidden(String sql) { + batchSize = 1; + + super.assertStatementForbidden(sql); + } + + /** + * + */ + static final class IndexingWithContext extends IgniteH2Indexing { + /** Client context. */ + static SqlClientContext cliCtx; + + /** {@inheritDoc} */ + @Override public List streamBatchedUpdateQuery(String schemaName, String qry, List params, + SqlClientContext cliCtx) throws IgniteCheckedException { + IndexingWithContext.cliCtx = cliCtx; + + return super.streamBatchedUpdateQuery(schemaName, qry, params, cliCtx); + } + + /** {@inheritDoc} */ + @Override public List>> querySqlFields(String schemaName, SqlFieldsQuery qry, + @Nullable SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, + GridQueryCancel cancel) { + IndexingWithContext.cliCtx = cliCtx; + + return super.querySqlFields(schemaName, qry, cliCtx, keepBinary, failOnMultipleStmts, cancel); + } + } +} \ No newline at end of file diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingNotOrderedSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingNotOrderedSelfTest.java new file mode 100644 index 0000000000000..b91258f8a4eb9 --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingNotOrderedSelfTest.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import java.sql.Connection; + +/** + * Tests for not ordered streaming via thin driver. + */ +public class JdbcThinStreamingNotOrderedSelfTest extends JdbcThinStreamingAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected Connection createStreamedConnection(boolean allowOverwrite, long flushFreq) throws Exception { + Connection c = JdbcThinAbstractSelfTest.connect(grid(0), null); + + execute(c, "SET STREAMING 1 BATCH_SIZE " + batchSize + + " ALLOW_OVERWRITE " + (allowOverwrite ? 1 : 0) + + " PER_NODE_BUFFER_SIZE 1000 " + + " FLUSH_FREQUENCY " + flushFreq + ";" + ); + + return c; + } +} \ No newline at end of file diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingOrderedSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingOrderedSelfTest.java new file mode 100644 index 0000000000000..b615f8cd9614e --- /dev/null +++ b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingOrderedSelfTest.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.jdbc.thin; + +import java.sql.Connection; + +/** + * Tests for ordered streaming via thin driver. + */ +public class JdbcThinStreamingOrderedSelfTest extends JdbcThinStreamingAbstractSelfTest { + /** {@inheritDoc} */ + @Override protected Connection createStreamedConnection(boolean allowOverwrite, long flushFreq) throws Exception { + Connection c = JdbcThinAbstractSelfTest.connect(grid(0), null); + + execute(c, "SET STREAMING 1 BATCH_SIZE " + batchSize + + " ALLOW_OVERWRITE " + (allowOverwrite ? 1 : 0) + + " PER_NODE_BUFFER_SIZE 1000 " + + " FLUSH_FREQUENCY " + flushFreq + + " ORDERED;" + ); + + return c; + } +} \ No newline at end of file diff --git a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingSelfTest.java b/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingSelfTest.java deleted file mode 100644 index 3c36f54327df9..0000000000000 --- a/modules/clients/src/test/java/org/apache/ignite/jdbc/thin/JdbcThinStreamingSelfTest.java +++ /dev/null @@ -1,486 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.jdbc.thin; - -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.Arrays; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.concurrent.Callable; -import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.IgniteDataStreamer; -import org.apache.ignite.cache.CachePeekMode; -import org.apache.ignite.cache.query.FieldsQueryCursor; -import org.apache.ignite.cache.query.SqlFieldsQuery; -import org.apache.ignite.internal.jdbc2.JdbcStreamingSelfTest; -import org.apache.ignite.internal.processors.query.GridQueryCancel; -import org.apache.ignite.internal.processors.query.GridQueryProcessor; -import org.apache.ignite.internal.processors.query.SqlClientContext; -import org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing; -import org.apache.ignite.internal.util.typedef.internal.U; -import org.apache.ignite.testframework.GridTestUtils; -import org.jetbrains.annotations.Nullable; - -/** - * Tests for streaming via thin driver. - */ -public class JdbcThinStreamingSelfTest extends JdbcStreamingSelfTest { - /** */ - private int batchSize = 17; - - /** {@inheritDoc} */ - @Override protected void beforeTestsStarted() throws Exception { - GridQueryProcessor.idxCls = IndexingWithContext.class; - - super.beforeTestsStarted(); - - batchSize = 17; - } - - /** {@inheritDoc} */ - @Override protected void afterTest() throws Exception { - try (Connection c = createOrdinaryConnection()) { - execute(c, "DROP TABLE PUBLIC.T IF EXISTS"); - } - - IndexingWithContext.cliCtx = null; - - super.afterTest(); - } - - /** {@inheritDoc} */ - @Override protected Connection createStreamedConnection(boolean allowOverwrite, long flushFreq) throws Exception { - Connection c = JdbcThinAbstractSelfTest.connect(grid(0), null ); - - execute(c, "SET STREAMING 1 BATCH_SIZE " + batchSize + " ALLOW_OVERWRITE " + (allowOverwrite ? 1 : 0) + - " PER_NODE_BUFFER_SIZE 1000 FLUSH_FREQUENCY " + flushFreq); - - return c; - } - - /** {@inheritDoc} */ - @Override protected Connection createOrdinaryConnection() throws SQLException { - return JdbcThinAbstractSelfTest.connect(grid(0), null); - } - - /** - * @throws Exception if failed. - */ - public void testStreamedBatchedInsert() throws Exception { - for (int i = 10; i <= 100; i += 10) - put(i, nameForId(i * 100)); - - try (Connection conn = createStreamedConnection(false)) { - assertStreamingState(true); - - try (PreparedStatement stmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?), " + - "(?, ?)")) { - for (int i = 1; i <= 100; i+= 2) { - stmt.setInt(1, i); - stmt.setString(2, nameForId(i)); - stmt.setInt(3, i + 1); - stmt.setString(4, nameForId(i + 1)); - - stmt.addBatch(); - } - - stmt.executeBatch(); - } - } - - U.sleep(500); - - // Now let's check it's all there. - for (int i = 1; i <= 100; i++) { - if (i % 10 != 0) - assertEquals(nameForId(i), nameForIdInCache(i)); - else // All that divides by 10 evenly should point to numbers 100 times greater - see above - assertEquals(nameForId(i * 100), nameForIdInCache(i)); - } - } - - /** - * @throws SQLException if failed. - */ - public void testSimultaneousStreaming() throws Exception { - try (Connection anotherConn = createOrdinaryConnection()) { - execute(anotherConn, "CREATE TABLE PUBLIC.T(x int primary key, y int) WITH " + - "\"cache_name=T,wrap_value=false\""); - } - - // Timeout to let connection close be handled on server side. - U.sleep(500); - - try (Connection conn = createStreamedConnection(false, 10000)) { - assertStreamingState(true); - - PreparedStatement firstStmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)"); - - PreparedStatement secondStmt = conn.prepareStatement("insert into PUBLIC.T(x, y) values (?, ?)"); - - try { - for (int i = 1; i <= 10; i++) { - firstStmt.setInt(1, i); - firstStmt.setString(2, nameForId(i)); - - firstStmt.executeUpdate(); - } - - for (int i = 51; i <= 67; i++) { - secondStmt.setInt(1, i); - secondStmt.setInt(2, i); - - secondStmt.executeUpdate(); - } - - for (int i = 11; i <= 50; i++) { - firstStmt.setInt(1, i); - firstStmt.setString(2, nameForId(i)); - - firstStmt.executeUpdate(); - } - - for (int i = 68; i <= 100; i++) { - secondStmt.setInt(1, i); - secondStmt.setInt(2, i); - - secondStmt.executeUpdate(); - } - - assertCacheEmpty(); - - SqlClientContext cliCtx = sqlClientContext(); - - HashMap> streamers = U.field(cliCtx, "streamers"); - - assertEquals(2, streamers.size()); - - assertEqualsCollections(new HashSet<>(Arrays.asList("person", "T")), streamers.keySet()); - } - finally { - U.closeQuiet(firstStmt); - - U.closeQuiet(secondStmt); - } - } - - // Let's wait a little so that all data arrives to destination - we can't intercept streamers' flush - // on connection close in any way. - U.sleep(1000); - - // Now let's check it's all there. - for (int i = 1; i <= 50; i++) - assertEquals(nameForId(i), nameForIdInCache(i)); - - for (int i = 51; i <= 100; i++) - assertEquals(i, grid(0).cache("T").get(i)); - } - - /** - * - */ - public void testStreamingWithMixedStatementTypes() throws Exception { - String prepStmtStr = "insert into Person(\"id\", \"name\") values (?, ?)"; - - String stmtStr = "insert into Person(\"id\", \"name\") values (%d, '%s')"; - - try (Connection conn = createStreamedConnection(false, 10000)) { - assertStreamingState(true); - - PreparedStatement firstStmt = conn.prepareStatement(prepStmtStr); - - Statement secondStmt = conn.createStatement(); - - try { - for (int i = 1; i <= 100; i++) { - boolean usePrep = Math.random() > 0.5; - - boolean useBatch = Math.random() > 0.5; - - if (usePrep) { - firstStmt.setInt(1, i); - firstStmt.setString(2, nameForId(i)); - - if (useBatch) - firstStmt.addBatch(); - else - firstStmt.execute(); - } - else { - String sql = String.format(stmtStr, i, nameForId(i)); - - if (useBatch) - secondStmt.addBatch(sql); - else - secondStmt.execute(sql); - } - } - } - finally { - U.closeQuiet(firstStmt); - - U.closeQuiet(secondStmt); - } - } - - // Let's wait a little so that all data arrives to destination - we can't intercept streamers' flush - // on connection close in any way. - U.sleep(1000); - - // Now let's check it's all there. - for (int i = 1; i <= 100; i++) - assertEquals(nameForId(i), nameForIdInCache(i)); - } - - /** - * @throws SQLException if failed. - */ - public void testStreamingOffToOn() throws SQLException { - try (Connection conn = createOrdinaryConnection()) { - assertStreamingState(false); - - execute(conn, "SET STREAMING 1"); - - assertStreamingState(true); - } - } - - /** - * @throws SQLException if failed. - */ - public void testStreamingOnToOff() throws Exception { - try (Connection conn = createStreamedConnection(false)) { - assertStreamingState(true); - - execute(conn, "SET STREAMING off"); - - assertStreamingState(false); - } - } - - /** - * @throws SQLException if failed. - */ - public void testFlush() throws Exception { - try (Connection conn = createStreamedConnection(false, 10000)) { - assertStreamingState(true); - - try (PreparedStatement stmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)")) { - for (int i = 1; i <= 100; i++) { - stmt.setInt(1, i); - stmt.setString(2, nameForId(i)); - - stmt.executeUpdate(); - } - } - - assertCacheEmpty(); - - execute(conn, "set streaming 0"); - - assertStreamingState(false); - - U.sleep(500); - - // Now let's check it's all there. - for (int i = 1; i <= 100; i++) - assertEquals(nameForId(i), nameForIdInCache(i)); - } - } - - /** - * @throws SQLException if failed. - */ - public void testStreamingReEnabled() throws Exception { - try (Connection conn = createStreamedConnection(false, 10000)) { - assertStreamingState(true); - - try (PreparedStatement stmt = conn.prepareStatement("insert into Person(\"id\", \"name\") values (?, ?)")) { - for (int i = 1; i <= 100; i++) { - stmt.setInt(1, i); - stmt.setString(2, nameForId(i)); - - stmt.executeUpdate(); - } - } - - assertCacheEmpty(); - - execute(conn, "set streaming 1 batch_size 111 allow_overwrite 0 per_node_buffer_size 512 " + - "per_node_parallel_operations 4 flush_frequency 5000"); - - U.sleep(500); - - assertEquals((Integer)111, U.field(conn, "streamBatchSize")); - - SqlClientContext cliCtx = sqlClientContext(); - - assertTrue(cliCtx.isStream()); - - assertFalse(U.field(cliCtx, "streamAllowOverwrite")); - - assertEquals((Integer)512, U.field(cliCtx, "streamNodeBufSize")); - - assertEquals((Long)5000L, U.field(cliCtx, "streamFlushTimeout")); - - assertEquals((Integer)4, U.field(cliCtx, "streamNodeParOps")); - - // Now let's check it's all there - SET STREAMING 1 repeated call must also have caused flush. - for (int i = 1; i <= 100; i++) - assertEquals(nameForId(i), nameForIdInCache(i)); - } - } - - /** - * - */ - @SuppressWarnings("ThrowableNotThrown") - public void testNonStreamedBatch() { - GridTestUtils.assertThrows(null, new Callable() { - @Override public Object call() throws Exception { - try (Connection conn = createOrdinaryConnection()) { - try (Statement s = conn.createStatement()) { - for (int i = 1; i <= 10; i++) - s.addBatch(String.format("insert into Person(\"id\", \"name\")values (%d, '%s')", i, - nameForId(i))); - - execute(conn, "SET STREAMING 1"); - - s.addBatch(String.format("insert into Person(\"id\", \"name\")values (%d, '%s')", 11, - nameForId(11))); - } - } - - return null; - } - }, SQLException.class, "Statement has non-empty batch (call executeBatch() or clearBatch() before " + - "enabling streaming)."); - } - - /** - * - */ - @SuppressWarnings("ThrowableNotThrown") - public void testStreamingStatementInTheMiddleOfNonPreparedBatch() { - GridTestUtils.assertThrows(null, new Callable() { - @Override public Object call() throws Exception { - try (Connection conn = createOrdinaryConnection()) { - try (Statement s = conn.createStatement()) { - s.addBatch(String.format("insert into Person(\"id\", \"name\")values (%d, '%s')", 1, - nameForId(1))); - - s.addBatch("SET STREAMING 1 FLUSH_FREQUENCY 10000"); - } - } - - return null; - } - }, SQLException.class, "Streaming control commands must be executed explicitly"); - } - - /** - * - */ - @SuppressWarnings("ThrowableNotThrown") - public void testBatchingSetStreamingStatement() { - GridTestUtils.assertThrows(null, new Callable() { - @Override public Object call() throws Exception { - try (Connection conn = createOrdinaryConnection()) { - try (PreparedStatement s = conn.prepareStatement("SET STREAMING 1 FLUSH_FREQUENCY 10000")) { - s.addBatch(); - } - } - - return null; - } - }, SQLException.class, "Streaming control commands must be executed explicitly"); - } - - /** - * Check that there's nothing in cache. - */ - private void assertCacheEmpty() { - assertEquals(0, cache().size(CachePeekMode.ALL)); - } - - /** - * @param conn Connection. - * @param sql Statement. - * @throws SQLException if failed. - */ - private static void execute(Connection conn, String sql) throws SQLException { - try (Statement s = conn.createStatement()) { - s.execute(sql); - } - } - - /** - * @return Active SQL client context. - */ - private SqlClientContext sqlClientContext() { - assertNotNull(IndexingWithContext.cliCtx); - - return IndexingWithContext.cliCtx; - } - - /** - * Check that streaming state on target node is as expected. - * @param on Expected streaming state. - */ - private void assertStreamingState(boolean on) { - SqlClientContext cliCtx = sqlClientContext(); - - assertEquals(on, cliCtx.isStream()); - } - - /** {@inheritDoc} */ - @Override protected void assertStatementForbidden(String sql) { - batchSize = 1; - - super.assertStatementForbidden(sql); - } - - /** - * - */ - private static final class IndexingWithContext extends IgniteH2Indexing { - /** Client context. */ - static SqlClientContext cliCtx; - - /** {@inheritDoc} */ - @Override public List streamBatchedUpdateQuery(String schemaName, String qry, List params, - SqlClientContext cliCtx) throws IgniteCheckedException { - IndexingWithContext.cliCtx = cliCtx; - - return super.streamBatchedUpdateQuery(schemaName, qry, params, cliCtx); - } - - /** {@inheritDoc} */ - @Override public List>> querySqlFields(String schemaName, SqlFieldsQuery qry, - @Nullable SqlClientContext cliCtx, boolean keepBinary, boolean failOnMultipleStmts, - GridQueryCancel cancel) { - IndexingWithContext.cliCtx = cliCtx; - - return super.querySqlFields(schemaName, qry, cliCtx, keepBinary, failOnMultipleStmts, cancel); - } - } -} \ No newline at end of file diff --git a/modules/clients/src/test/java/org/apache/ignite/loadtests/client/ClientMarshallerBenchmarkTest.java b/modules/clients/src/test/java/org/apache/ignite/loadtests/client/ClientMarshallerBenchmarkTest.java index 570678fc81d22..08c2cbe375213 100644 --- a/modules/clients/src/test/java/org/apache/ignite/loadtests/client/ClientMarshallerBenchmarkTest.java +++ b/modules/clients/src/test/java/org/apache/ignite/loadtests/client/ClientMarshallerBenchmarkTest.java @@ -22,11 +22,14 @@ import java.util.HashMap; import java.util.Map; import java.util.UUID; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.internal.client.marshaller.GridClientMarshaller; import org.apache.ignite.internal.client.marshaller.jdk.GridClientJdkMarshaller; import org.apache.ignite.internal.client.marshaller.optimized.GridClientOptimizedMarshaller; import org.apache.ignite.internal.processors.rest.client.message.GridClientCacheRequest; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.marshaller.MarshallerUtils; import org.apache.ignite.testframework.junits.common.GridCommonAbstractTest; import static org.apache.ignite.internal.processors.rest.client.message.GridClientCacheRequest.GridCacheOperation.CAS; @@ -41,10 +44,15 @@ public class ClientMarshallerBenchmarkTest extends GridCommonAbstractTest { /** */ public ClientMarshallerBenchmarkTest() { - marshallers = new GridClientMarshaller[] { - new GridClientJdkMarshaller(), - new GridClientOptimizedMarshaller() - }; + try { + marshallers = new GridClientMarshaller[] { + new GridClientJdkMarshaller(MarshallerUtils.classNameFilter(this.getClass().getClassLoader())), + new GridClientOptimizedMarshaller() + }; + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } } /** diff --git a/modules/clients/src/test/keystore/ca/node01.jks b/modules/clients/src/test/keystore/ca/node01.jks index 23c0643dd24b9..7dec684768fe7 100644 Binary files a/modules/clients/src/test/keystore/ca/node01.jks and b/modules/clients/src/test/keystore/ca/node01.jks differ diff --git a/modules/clients/src/test/keystore/ca/node02.jks b/modules/clients/src/test/keystore/ca/node02.jks index 26da4b5adf49c..985abae4b26f7 100644 Binary files a/modules/clients/src/test/keystore/ca/node02.jks and b/modules/clients/src/test/keystore/ca/node02.jks differ diff --git a/modules/clients/src/test/keystore/ca/node03.jks b/modules/clients/src/test/keystore/ca/node03.jks index 831ca2400b14b..9a6ab40693d93 100644 Binary files a/modules/clients/src/test/keystore/ca/node03.jks and b/modules/clients/src/test/keystore/ca/node03.jks differ diff --git a/modules/clients/src/test/keystore/ca/oneindex.txt b/modules/clients/src/test/keystore/ca/oneindex.txt index 8d347d072ef92..5d0e1c9163a0d 100644 --- a/modules/clients/src/test/keystore/ca/oneindex.txt +++ b/modules/clients/src/test/keystore/ca/oneindex.txt @@ -1 +1 @@ -V 180824104710Z 01 unknown /CN=node01 +V 210823155040Z 01 unknown /CN=node01 diff --git a/modules/clients/src/test/keystore/ca/twoindex.txt b/modules/clients/src/test/keystore/ca/twoindex.txt index 00b7307287709..1f9359d2d902e 100644 --- a/modules/clients/src/test/keystore/ca/twoindex.txt +++ b/modules/clients/src/test/keystore/ca/twoindex.txt @@ -1,2 +1,2 @@ -V 180824104716Z 01 unknown /CN=node02 -V 180824104719Z 02 unknown /CN=node03 +V 210823155541Z 01 unknown /CN=node02 +V 210823155835Z 02 unknown /CN=node03 diff --git a/modules/clients/src/test/resources/bulkload2_utf.csv b/modules/clients/src/test/resources/bulkload2_utf8.csv similarity index 100% rename from modules/clients/src/test/resources/bulkload2_utf.csv rename to modules/clients/src/test/resources/bulkload2_utf8.csv diff --git a/modules/clients/src/test/resources/bulkload2_windows1251.csv b/modules/clients/src/test/resources/bulkload2_windows1251.csv new file mode 100644 index 0000000000000..b0dcde05b4a77 --- /dev/null +++ b/modules/clients/src/test/resources/bulkload2_windows1251.csv @@ -0,0 +1,2 @@ +123,12,"123 123",123 +456,45,"456","456" \ No newline at end of file diff --git a/modules/cloud/pom.xml b/modules/cloud/pom.xml index 00ec33b78f833..d9db7fc025f65 100644 --- a/modules/cloud/pom.xml +++ b/modules/cloud/pom.xml @@ -29,7 +29,7 @@ ignite-cloud - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org diff --git a/modules/codegen/pom.xml b/modules/codegen/pom.xml index 345f7130485dc..8668256df0281 100644 --- a/modules/codegen/pom.xml +++ b/modules/codegen/pom.xml @@ -32,7 +32,7 @@ ignite-codegen - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org diff --git a/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java b/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java index 99cf84946ab92..9ecc46a72713b 100644 --- a/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java +++ b/modules/codegen/src/main/java/org/apache/ignite/codegen/MessageCodeGenerator.java @@ -38,12 +38,19 @@ import java.util.Set; import java.util.TreeSet; import java.util.UUID; +import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLockRequest; +import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxPrepareRequest; +import org.apache.ignite.internal.processors.cache.distributed.near.GridNearGetRequest; +import org.apache.ignite.internal.processors.cache.distributed.near.GridNearLockRequest; +import org.apache.ignite.internal.processors.cache.distributed.near.GridNearSingleGetRequest; +import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxPrepareRequest; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.GridCodegenConverter; import org.apache.ignite.internal.GridDirectCollection; import org.apache.ignite.internal.GridDirectMap; import org.apache.ignite.internal.GridDirectTransient; import org.apache.ignite.internal.IgniteCodeGeneratingFail; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteUuid; @@ -115,6 +122,7 @@ public class MessageCodeGenerator { TYPES.put(BitSet.class, MessageCollectionItemType.BIT_SET); TYPES.put(UUID.class, MessageCollectionItemType.UUID); TYPES.put(IgniteUuid.class, MessageCollectionItemType.IGNITE_UUID); + TYPES.put(AffinityTopologyVersion.class, MessageCollectionItemType.AFFINITY_TOPOLOGY_VERSION); } /** @@ -168,25 +176,25 @@ public static void main(String[] args) throws Exception { // gen.generateAll(true); -// gen.generateAndWrite(GridChangeGlobalStateMessageResponse.class); - -// gen.generateAndWrite(GridNearAtomicUpdateRequest.class); +// gen.generateAndWrite(GridCacheMessage.class); // gen.generateAndWrite(GridMessageCollection.class); // gen.generateAndWrite(DataStreamerEntry.class); // gen.generateAndWrite(GridDistributedLockRequest.class); // gen.generateAndWrite(GridDistributedLockResponse.class); -// gen.generateAndWrite(GridNearLockRequest.class); // gen.generateAndWrite(GridNearLockResponse.class); -// gen.generateAndWrite(GridDhtLockRequest.class); + gen.generateAndWrite(GridNearLockRequest.class); + gen.generateAndWrite(GridDhtLockRequest.class); + gen.generateAndWrite(GridNearSingleGetRequest.class); + gen.generateAndWrite(GridNearGetRequest.class); + gen.generateAndWrite(GridDhtTxPrepareRequest.class); + gen.generateAndWrite(GridNearTxPrepareRequest.class); // gen.generateAndWrite(GridDhtLockResponse.class); // // gen.generateAndWrite(GridDistributedTxPrepareRequest.class); // gen.generateAndWrite(GridDistributedTxPrepareResponse.class); -// gen.generateAndWrite(GridNearTxPrepareRequest.class); // gen.generateAndWrite(GridNearTxPrepareResponse.class); -// gen.generateAndWrite(GridDhtTxPrepareRequest.class); // gen.generateAndWrite(GridDhtTxPrepareResponse.class); // // gen.generateAndWrite(GridDistributedTxFinishRequest.class); @@ -658,6 +666,8 @@ else if (type == UUID.class) returnFalseIfFailed(write, "writer.writeUuid", field, getExpr); else if (type == IgniteUuid.class) returnFalseIfFailed(write, "writer.writeIgniteUuid", field, getExpr); + else if (type == AffinityTopologyVersion.class) + returnFalseIfFailed(write, "writer.writeAffinityTopologyVersion", field, getExpr); else if (type.isEnum()) { String arg = getExpr + " != null ? (byte)" + getExpr + ".ordinal() : -1"; @@ -740,6 +750,8 @@ else if (type == UUID.class) returnFalseIfReadFailed(name, "reader.readUuid", setExpr, field); else if (type == IgniteUuid.class) returnFalseIfReadFailed(name, "reader.readIgniteUuid", setExpr, field); + else if (type == AffinityTopologyVersion.class) + returnFalseIfReadFailed(name, "reader.readAffinityTopologyVersion", setExpr, field); else if (type.isEnum()) { String loc = name + "Ord"; diff --git a/modules/compatibility/pom.xml b/modules/compatibility/pom.xml index a04d7348f59b5..0ce29de34e4a4 100644 --- a/modules/compatibility/pom.xml +++ b/modules/compatibility/pom.xml @@ -33,7 +33,7 @@ ignite-compatibility - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java index b36f563337a63..5a3740cae3fc0 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/DummyPersistenceCompatibilityTest.java @@ -67,6 +67,7 @@ public class DummyPersistenceCompatibilityTest extends IgnitePersistenceCompatib .setDefaultDataRegionConfiguration( new DataRegionConfiguration() .setPersistenceEnabled(true) + .setMaxSize(DataStorageConfiguration.DFLT_DATA_REGION_INITIAL_SIZE) )); cfg.setBinaryConfiguration( diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/MigratingToWalV2SerializerWithCompactionTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/MigratingToWalV2SerializerWithCompactionTest.java index d79790ecd7778..c72ce2fe96d9c 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/MigratingToWalV2SerializerWithCompactionTest.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/persistence/MigratingToWalV2SerializerWithCompactionTest.java @@ -66,7 +66,10 @@ public class MigratingToWalV2SerializerWithCompactionTest extends IgnitePersiste DataStorageConfiguration memCfg = new DataStorageConfiguration() .setDefaultDataRegionConfiguration( - new DataRegionConfiguration().setPersistenceEnabled(true)) + new DataRegionConfiguration() + .setPersistenceEnabled(true) + .setMaxSize(DataStorageConfiguration.DFLT_DATA_REGION_INITIAL_SIZE) + ) .setWalSegmentSize(WAL_SEGMENT_SIZE) .setWalCompactionEnabled(true) .setWalMode(WALMode.LOG_ONLY) diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java index 8202c1bd46049..8c34e2b599516 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/junits/IgniteCompatibilityAbstractTest.java @@ -138,7 +138,7 @@ protected IgniteEx startGrid(final String igniteInstanceName, final String ver, final IgniteConfiguration cfg = getConfiguration(igniteInstanceName); // stub - won't be used at node startup - IgniteProcessProxy ignite = new IgniteProcessProxy(cfg, log, locJvmInstance, true) { + IgniteProcessProxy ignite = new IgniteProcessProxy(cfg, log, locJvmInstance == null ? null: (x) -> locJvmInstance, true) { @Override protected IgniteLogger logger(IgniteLogger log, Object ctgr) { return ListenedGridTestLog4jLogger.createLogger(ctgr + "#" + ver.replaceAll("\\.", "_")); } diff --git a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/util/MavenUtils.java b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/util/MavenUtils.java index 7eb3131a3fa01..768634cd0c6d4 100644 --- a/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/util/MavenUtils.java +++ b/modules/compatibility/src/test/java/org/apache/ignite/compatibility/testframework/util/MavenUtils.java @@ -22,12 +22,16 @@ import java.io.File; import java.io.InputStreamReader; import java.nio.file.Files; +import java.nio.file.Path; import java.nio.file.Paths; import java.util.concurrent.Callable; import java.util.concurrent.Executors; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; + +import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.X; +import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -147,8 +151,22 @@ private static String defineMavenLocalRepositoryPath() throws Exception { private static void downloadArtifact(String artifact) throws Exception { X.println("Downloading artifact... Identifier: " + artifact); - exec(buildMvnCommand() + " org.apache.maven.plugins:maven-dependency-plugin:3.0.2:get -Dartifact=" + artifact + - (useGgRepo ? " -DremoteRepositories=" + GG_MVN_REPO : "")); + // Default platform independ path for maven settings file. + Path localProxyMavenSettings = Paths.get(System.getProperty("user.home"), ".m2", "local-proxy.xml"); + + String localProxyMavenSettingsFromEnv = System.getenv("LOCAL_PROXY_MAVEN_SETTINGS"); + + SB mavenCommandArgs = new SB(" org.apache.maven.plugins:maven-dependency-plugin:3.0.2:get -Dartifact=" + artifact); + + if (!F.isEmpty(localProxyMavenSettingsFromEnv)) + localProxyMavenSettings = Paths.get(localProxyMavenSettingsFromEnv); + + if (Files.exists(localProxyMavenSettings)) + mavenCommandArgs.a(" -s " + localProxyMavenSettings.toString()); + else + mavenCommandArgs.a(useGgRepo ? " -DremoteRepositories=" + GG_MVN_REPO : ""); + + exec(buildMvnCommand() + mavenCommandArgs.toString()); X.println("Download is finished"); } diff --git a/modules/core/pom.xml b/modules/core/pom.xml index 68c19a7bada45..0d3436f9cd840 100644 --- a/modules/core/pom.xml +++ b/modules/core/pom.xml @@ -31,7 +31,7 @@ ignite-core - 2.5.0-SNAPSHOT + 2.5.6-SNAPSHOT http://ignite.apache.org @@ -84,7 +84,7 @@ commons-collections commons-collections - 3.2.2 + ${commons.collections.version} test @@ -126,7 +126,7 @@ org.mockito mockito-all - 1.9.5 + ${mockito.version} test @@ -443,23 +443,6 @@ test - - copy-domain - - copy - - - - - javax.cache - app-domain - ${javax.cache.tck.version} - ${domain-lib-dir} - ${domain-jar} - - - - diff --git a/modules/core/src/main/java/META-INF/NOTICE b/modules/core/src/main/java/META-INF/NOTICE index 4c99a05109185..0bae4345e6446 100644 --- a/modules/core/src/main/java/META-INF/NOTICE +++ b/modules/core/src/main/java/META-INF/NOTICE @@ -1,5 +1,5 @@ Apache Ignite -Copyright 2018 The Apache Software Foundation +Copyright 2019 The Apache Software Foundation This product includes software developed at The Apache Software Foundation (http://www.apache.org/). diff --git a/modules/core/src/main/java/org/apache/ignite/DataRegionMetrics.java b/modules/core/src/main/java/org/apache/ignite/DataRegionMetrics.java index dc48f115c4ec4..87a9e19919e00 100644 --- a/modules/core/src/main/java/org/apache/ignite/DataRegionMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/DataRegionMetrics.java @@ -63,6 +63,18 @@ public interface DataRegionMetrics { */ public long getTotalAllocatedPages(); + /** + * Gets a total number of pages used for storing the data. It includes allocated pages except of empty + * pages that are not used yet or pages that can be reused. + *

+ * E. g. data region contains 1000 allocated pages, and 200 pages are used to store some data, this + * metric shows 200 used pages. Then the data was partially deleted and 50 pages were totally freed, + * hence this metric should show 150 used pages. + * + * @return Total number of used pages. + */ + public long getTotalUsedPages(); + /** * Gets a total size of memory allocated in the data region. When persistence is disabled, this * metric shows the total size of pages in memory. When persistence is enabled, this metric shows the @@ -146,11 +158,18 @@ public interface DataRegionMetrics { public long getPhysicalMemorySize(); /** - * Gets checkpoint buffer size in pages. + * Gets used checkpoint buffer size in pages. * * @return Checkpoint buffer size in pages. */ - public long getCheckpointBufferPages(); + public long getUsedCheckpointBufferPages(); + + /** + * Gets used checkpoint buffer size in bytes. + * + * @return Checkpoint buffer size in bytes. + */ + public long getUsedCheckpointBufferSize(); /** * Gets checkpoint buffer size in bytes. @@ -165,4 +184,39 @@ public interface DataRegionMetrics { * @return Page size in bytes. */ public int getPageSize(); + + /** + * The number of read pages from last restart. + * + * @return The number of read pages from last restart. + */ + public long getPagesRead(); + + /** + * The number of written pages from last restart. + * + * @return The number of written pages from last restart. + */ + public long getPagesWritten(); + + /** + * The number of replaced pages from last restart . + * + * @return The number of replaced pages from last restart . + */ + public long getPagesReplaced(); + + /** + * Total offheap size in bytes. + * + * @return Total offheap size in bytes. + */ + public long getOffHeapSize(); + + /** + * Total used offheap size in bytes. + * + * @return Total used offheap size in bytes. + */ + public long getOffheapUsedSize(); } diff --git a/modules/core/src/main/java/org/apache/ignite/DataRegionMetricsProvider.java b/modules/core/src/main/java/org/apache/ignite/DataRegionMetricsProvider.java new file mode 100644 index 0000000000000..4a130391f96e1 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/DataRegionMetricsProvider.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite; + + +/** + * This interface provides calculated metrics for data region. + */ +public interface DataRegionMetricsProvider { + /** + * Calculates free space of partially filled pages for this data region. It does not include + * empty data pages. + * + * @return free space in bytes. + */ + public long partiallyFilledPagesFreeSpace(); + + /** + * Calculates empty data pages count for region. It counts only totally free pages that + * can be reused (e. g. pages that are contained in reuse bucket of free list). + * + * @return empty data pages count. + */ + public long emptyDataPages(); +} diff --git a/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java b/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java index 680caba62b8f9..cdde0aced29d8 100644 --- a/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/DataStorageMetrics.java @@ -66,6 +66,27 @@ public interface DataStorageMetrics { */ public long getWalBuffPollSpinsRate(); + /** + * Total size in bytes for storage wal files. + * + * @return Total size in bytes for storage wal files. + */ + public long getWalTotalSize(); + + /** + * Time of the last WAL segment rollover. + * + * @return Time of the last WAL segment rollover. + */ + public long getWalLastRollOverTime(); + + /** + * Total checkpoint time from last restart. + * + * @return Total checkpoint time from last restart. + */ + public long getCheckpointTotalTime(); + /** * Gets the duration of the last checkpoint in milliseconds. * @@ -121,4 +142,74 @@ public interface DataStorageMetrics { * @return Total number of pages copied to a temporary checkpoint buffer during the last checkpoint. */ public long getLastCheckpointCopiedOnWritePagesNumber(); + + /** + * Total dirty pages for the next checkpoint. + * + * @return Total dirty pages for the next checkpoint. + */ + public long getDirtyPages(); + + /** + * The number of read pages from last restart. + * + * @return The number of read pages from last restart. + */ + public long getPagesRead(); + + /** + * The number of written pages from last restart. + * + * @return The number of written pages from last restart. + */ + public long getPagesWritten(); + + /** + * The number of replaced pages from last restart. + * + * @return The number of replaced pages from last restart. + */ + public long getPagesReplaced(); + + /** + * Total offheap size in bytes. + * + * @return Total offheap size in bytes. + */ + public long getOffHeapSize(); + + /** + * Total used offheap size in bytes. + * + * @return Total used offheap size in bytes. + */ + public long getOffheapUsedSize(); + + /** + * Total size of memory allocated in bytes. + * + * @return Total size of memory allocated in bytes. + */ + public long getTotalAllocatedSize(); + + /** + * Gets used checkpoint buffer size in pages. + * + * @return Checkpoint buffer size in pages. + */ + public long getUsedCheckpointBufferPages(); + + /** + * Gets used checkpoint buffer size in bytes. + * + * @return Checkpoint buffer size in bytes. + */ + public long getUsedCheckpointBufferSize(); + + /** + * Checkpoint buffer size in bytes. + * + * @return Checkpoint buffer size in bytes. + */ + public long getCheckpointBufferSize(); } diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteCache.java b/modules/core/src/main/java/org/apache/ignite/IgniteCache.java index cd8264bb05a3a..628f019787a54 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteCache.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteCache.java @@ -1504,7 +1504,7 @@ public IgniteFuture>> invokeAllAsync(Set lostPartitions(); @@ -1514,4 +1514,56 @@ public IgniteFuture>> invokeAllAsync(Set + * This is useful for fast iteration over cache partition data if persistence is enabled and the data is "cold". + *

+ * Preload will reduce available amount of page memory for subsequent operations and may lead to earlier page + * replacement. + *

+ * This method is irrelevant for in-memory caches. Calling this method on an in-memory cache will result in + * exception. + * + * @param partition Partition. + */ + public void preloadPartition(int partition); + + /** + * Efficiently preloads cache partition into page memory. + *

+ * This is useful for fast iteration over cache partition data if persistence is enabled and the data is "cold". + *

+ * Preload will reduce available amount of page memory for subsequent operations and may lead to earlier page + * replacement. + *

+ * This method is irrelevant for in-memory caches. Calling this method on an in-memory cache will result in + * exception. + * + * @param partition Partition. + * @return A future representing pending completion of the partition preloading. + */ + public IgniteFuture preloadPartitionAsync(int partition); + + /** + * Efficiently preloads cache partition into page memory if it exists on the local node. + *

+ * This is useful for fast iteration over cache partition data if persistence is enabled and the data is "cold". + *

+ * Preload will reduce available amount of page memory for subsequent operations and may lead to earlier page + * replacement. + *

+ * This method is irrelevant for in-memory caches. Calling this method on an in-memory cache will result in + * exception. + * + * @param partition Partition. + * @return {@code True} if partition was preloaded, {@code false} if it doesn't belong to local node. + */ + public boolean localPreloadPartition(int partition); } diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteCacheRestartingException.java b/modules/core/src/main/java/org/apache/ignite/IgniteCacheRestartingException.java index a3a749070cec1..1dbfc67dc69dd 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteCacheRestartingException.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteCacheRestartingException.java @@ -18,6 +18,7 @@ package org.apache.ignite; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.lang.IgniteFuture; import org.jetbrains.annotations.Nullable; @@ -29,26 +30,34 @@ public class IgniteCacheRestartingException extends IgniteException { private static final long serialVersionUID = 0L; /** */ - private final IgniteFuture restartFut; + private final transient IgniteFuture restartFut; + + /** + * @param cacheName Error message. + */ + public IgniteCacheRestartingException(String cacheName) { + this(null, cacheName, null); + } /** * @param restartFut Restart future. - * @param msg Error message. + * @param cacheName Error message. */ - public IgniteCacheRestartingException(IgniteFuture restartFut, String msg) { - this(restartFut, msg, null); + public IgniteCacheRestartingException(IgniteFuture restartFut, String cacheName) { + this(restartFut, cacheName, null); } /** * @param restartFut Restart future. - * @param msg Error message. + * @param cacheName Cache name what is restarting. * @param cause Optional nested exception (can be {@code null}). */ public IgniteCacheRestartingException( IgniteFuture restartFut, - String msg, - @Nullable Throwable cause) { - super(msg, cause); + String cacheName, + @Nullable Throwable cause + ) { + super("Cache is restarting:" + cacheName + ", you could wait restart completion with restartFuture", cause); this.restartFut = restartFut; } diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteCluster.java b/modules/core/src/main/java/org/apache/ignite/IgniteCluster.java index 7329d682ebea4..fc0e81bcae2ba 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteCluster.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteCluster.java @@ -420,6 +420,20 @@ public IgniteFuture> startNodesAsync(Collecti */ public void enableStatistics(Collection caches, boolean enabled); + /** + * Clear statistics for caches cluster wide. + * + * @param caches Collection of cache names. + */ + public void clearStatistics(Collection caches); + + /** + * Sets transaction timeout on partition map exchange. + * + * @param timeout Transaction timeout on partition map exchange in milliseconds. + */ + public void setTxTimeoutOnPartitionMapExchange(long timeout); + /** * If local client node disconnected from cluster returns future * that will be completed when client reconnected. diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java index 8195bd4302735..aed6f8653eaa1 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteJdbcDriver.java @@ -30,7 +30,6 @@ import org.apache.ignite.cache.affinity.AffinityKey; import org.apache.ignite.internal.jdbc.JdbcConnection; import org.apache.ignite.internal.jdbc.JdbcDriverPropertyInfo; -import org.apache.ignite.logger.java.JavaLogger; /** * JDBC driver implementation for In-Memory Data Grid. @@ -406,7 +405,7 @@ public class IgniteJdbcDriver implements Driver { private static final int MINOR_VER = 0; /** Logger. */ - private static final IgniteLogger LOG = new JavaLogger(); + private static final Logger LOG = Logger.getLogger(IgniteJdbcDriver.class.getName()); /* * Static initializer. diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteScheduler.java b/modules/core/src/main/java/org/apache/ignite/IgniteScheduler.java index 2e2553b95213d..e71a39362b868 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteScheduler.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteScheduler.java @@ -109,4 +109,4 @@ public interface IgniteScheduler { * @return Scheduled execution future. */ public SchedulerFuture scheduleLocal(Callable c, String ptrn); -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java index f7128c0ed1fc9..80795cb0cdc43 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteSystemProperties.java @@ -19,6 +19,7 @@ import java.io.Serializable; import java.lang.management.RuntimeMXBean; +import java.util.Arrays; import java.util.Iterator; import java.util.Map; import java.util.Properties; @@ -26,9 +27,12 @@ import org.apache.ignite.cache.CacheEntryProcessor; import org.apache.ignite.cluster.ClusterGroup; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.CheckpointWriteOrder; import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.client.GridClient; import org.apache.ignite.internal.marshaller.optimized.OptimizedMarshaller; +import org.apache.ignite.internal.processors.rest.GridRestCommand; import org.apache.ignite.stream.StreamTransformer; import org.jetbrains.annotations.Nullable; @@ -85,8 +89,12 @@ public final class IgniteSystemProperties { /** * If this system property is set to {@code false} - no checks for new versions will * be performed by Ignite. By default, Ignite periodically checks for the new - * version and prints out the message into the log if new version of Ignite is + * version and prints out the message into the log if a new version of Ignite is * available for download. + * + * Update notifier enabled flag is a cluster-wide value and determined according to the local setting + * during the start of the first node in the cluster. The chosen value will survive the first node shutdown + * and will override the property value on all newly joining nodes. */ public static final String IGNITE_UPDATE_NOTIFIER = "IGNITE_UPDATE_NOTIFIER"; @@ -117,9 +125,12 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_JETTY_LOG_NO_OVERRIDE = "IGNITE_JETTY_LOG_NO_OVERRIDE"; - /** This property allow rewriting default ({@code 30}) rest session expire time (in seconds). */ + /** This property allow rewriting default ({@code 30}) REST session expire time (in seconds). */ public static final String IGNITE_REST_SESSION_TIMEOUT = "IGNITE_REST_SESSION_TIMEOUT"; + /** This property allow rewriting default ({@code 300}) REST session security token expire time (in seconds). */ + public static final String IGNITE_REST_SECURITY_TOKEN_TIMEOUT = "IGNITE_REST_SECURITY_TOKEN_TIMEOUT"; + /** * This property allows to override maximum count of task results stored on one node * in REST processor. @@ -132,6 +143,15 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_REST_START_ON_CLIENT = "IGNITE_REST_START_ON_CLIENT"; + /** + * This property changes output format of {@link GridRestCommand#CACHE_GET_ALL} from {k: v, ...} + * to [{"key": k, "value": v}, ...] to allow non-string keys output. + * + * @deprecated Should be made default in Apache Ignite 3.0. + */ + @Deprecated + public static final String IGNITE_REST_GETALL_AS_ARRAY = "IGNITE_REST_GETALL_AS_ARRAY"; + /** * This property defines the maximum number of attempts to remap near get to the same * primary node. Remapping may be needed when topology is changed concurrently with @@ -277,6 +297,9 @@ public final class IgniteSystemProperties { /** System property to hold SSH host for visor-started nodes. */ public static final String IGNITE_SSH_HOST = "IGNITE_SSH_HOST"; + /** System property to enable experimental commands in control.sh script. */ + public static final String IGNITE_ENABLE_EXPERIMENTAL_COMMAND = "IGNITE_ENABLE_EXPERIMENTAL_COMMAND"; + /** System property to hold SSH user name for visor-started nodes. */ public static final String IGNITE_SSH_USER_NAME = "IGNITE_SSH_USER_NAME"; @@ -422,6 +445,13 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_MBEANS_DISABLED = "IGNITE_MBEANS_DISABLED"; + /** + * If property is set to {@code true}, then test features will be enabled. + * + * Default is {@code false}. + */ + public static final String IGNITE_TEST_FEATURES_ENABLED = "IGNITE_TEST_FEATURES_ENABLED"; + /** * Property controlling size of buffer holding last exception. Default value of {@code 1000}. */ @@ -458,6 +488,15 @@ public final class IgniteSystemProperties { /** Force all SQL queries to be processed lazily regardless of what clients request. */ public static final String IGNITE_SQL_FORCE_LAZY_RESULT_SET = "IGNITE_SQL_FORCE_LAZY_RESULT_SET"; + /** Disable SQL system views. */ + public static final String IGNITE_SQL_DISABLE_SYSTEM_VIEWS = "IGNITE_SQL_DISABLE_SYSTEM_VIEWS"; + + /** SQL retry timeout. */ + public static final String IGNITE_SQL_RETRY_TIMEOUT = "IGNITE_SQL_RETRY_TIMEOUT"; + + /** Enable backward compatible handling of UUID through DDL. */ + public static final String IGNITE_SQL_UUID_DDL_BYTE_FORMAT = "IGNITE_SQL_UUID_DDL_BYTE_FORMAT"; + /** Maximum size for affinity assignment history. */ public static final String IGNITE_AFFINITY_HISTORY_SIZE = "IGNITE_AFFINITY_HISTORY_SIZE"; @@ -468,6 +507,10 @@ public final class IgniteSystemProperties { public static final String IGNITE_DISCOVERY_CLIENT_RECONNECT_HISTORY_SIZE = "IGNITE_DISCOVERY_CLIENT_RECONNECT_HISTORY_SIZE"; + /** Time interval that indicates that client reconnect throttle must be reset to zero. 2 minutes by default. */ + public static final String CLIENT_THROTTLE_RECONNECT_RESET_TIMEOUT_INTERVAL = + "CLIENT_THROTTLE_RECONNECT_RESET_TIMEOUT_INTERVAL"; + /** Number of cache operation retries in case of topology exceptions. */ public static final String IGNITE_CACHE_RETRIES_COUNT = "IGNITE_CACHE_RETRIES_COUNT"; @@ -746,8 +789,10 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_WAL_LOG_TX_RECORDS = "IGNITE_WAL_LOG_TX_RECORDS"; - /** If this property is set, {@link DataStorageConfiguration#writeThrottlingEnabled} will be overridden to true - * independent of initial value in configuration. */ + /** + * If this property is set, {@link DataStorageConfiguration#isWriteThrottlingEnabled()} + * will be overridden to {@code true} regardless the initial value in the configuration. + */ public static final String IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED = "IGNITE_OVERRIDE_WRITE_THROTTLING_ENABLED"; /** @@ -755,6 +800,9 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_WAL_SERIALIZER_VERSION = "IGNITE_WAL_SERIALIZER_VERSION"; + /** Property for setup Ignite WAL segment sync timeout. */ + public static final String IGNITE_WAL_SEGMENT_SYNC_TIMEOUT = "IGNITE_WAL_SEGMENT_SYNC_TIMEOUT"; + /** * If the property is set Ignite will use legacy node comparator (based on node order) inste * @@ -827,6 +875,12 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_WAL_FSYNC_WITH_DEDICATED_WORKER = "IGNITE_WAL_FSYNC_WITH_DEDICATED_WORKER"; + /** + * When set to {@code true}, on-heap cache cannot be enabled - see + * {@link CacheConfiguration#setOnheapCacheEnabled(boolean)}. + * Default is {@code false}. + */ + public static final String IGNITE_DISABLE_ONHEAP_CACHE = "IGNITE_DISABLE_ONHEAP_CACHE"; /** * When set to {@code false}, loaded pages implementation is switched to previous version of implementation, * FullPageIdTable. {@code True} value enables 'Robin Hood hashing: backward shift deletion'. @@ -834,6 +888,29 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_LOADED_PAGES_BACKWARD_SHIFT_MAP = "IGNITE_LOADED_PAGES_BACKWARD_SHIFT_MAP"; + /** + * Property for setup percentage of archive size for checkpoint trigger. Default value is 0.25 + */ + public static final String IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE = "IGNITE_CHECKPOINT_TRIGGER_ARCHIVE_SIZE_PERCENTAGE"; + + /** + * Property for setup percentage of WAL archive size to calculate threshold since which removing of old archive should be started. + * Default value is 0.5 + */ + public static final String IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE = "IGNITE_THRESHOLD_WAL_ARCHIVE_SIZE_PERCENTAGE"; + + /** + * Threshold time (in millis) to print warning to log if waiting for next wal segment took longer than the threshold. + * + * Default value is 1000 ms. + */ + public static final String IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT = "IGNITE_THRESHOLD_WAIT_TIME_NEXT_WAL_SEGMENT"; + + /** + * Count of WAL compressor worker threads. Default value is 4. + */ + public static final String IGNITE_WAL_COMPRESSOR_WORKER_THREAD_CNT = "IGNITE_WAL_COMPRESSOR_WORKER_THREAD_CNT"; + /** * Whenever read load balancing is enabled, that means 'get' requests will be distributed between primary and backup * nodes if it is possible and {@link CacheConfiguration#readFromBackup} is {@code true}. @@ -844,6 +921,278 @@ public final class IgniteSystemProperties { */ public static final String IGNITE_READ_LOAD_BALANCING = "IGNITE_READ_LOAD_BALANCING"; + /** + * Number of repetitions to capture a lock in the B+Tree. + */ + public static final String IGNITE_BPLUS_TREE_LOCK_RETRIES = "IGNITE_BPLUS_TREE_LOCK_RETRIES"; + + /** + * Amount of memory reserved in the heap at node start, which can be dropped to increase the chances of success when + * handling OutOfMemoryError. + * + * Default is {@code 64kb}. + */ + public static final String IGNITE_FAILURE_HANDLER_RESERVE_BUFFER_SIZE = "IGNITE_FAILURE_HANDLER_RESERVE_BUFFER_SIZE"; + + /** + * When set to {@code true}, WAL will be automatically disabled during rebalancing if there is no partition in + * OWNING state. + * Default is {@code false}. + */ + public static final String IGNITE_DISABLE_WAL_DURING_REBALANCING = "IGNITE_DISABLE_WAL_DURING_REBALANCING"; + + /** + * Sets timeout for TCP client recovery descriptor reservation. + */ + public static final String IGNITE_NIO_RECOVERY_DESCRIPTOR_RESERVATION_TIMEOUT = + "IGNITE_NIO_RECOVERY_DESCRIPTOR_RESERVATION_TIMEOUT"; + + /** + * When set to {@code true}, Ignite will skip partitions sizes check on partition validation after rebalance has finished. + * Partitions sizes may differs on nodes when Expiry Policy is in use and it is ok due to lazy entry eviction mechanics. + * + * There is no need to disable partition size validation either in normal case or when expiry policy is configured for cache. + * But it should be disabled manually when policy is used on per entry basis to hint Ignite to skip this check. + * + * Default is {@code false}. + */ + public static final String IGNITE_SKIP_PARTITION_SIZE_VALIDATION = "IGNITE_SKIP_PARTITION_SIZE_VALIDATION"; + + /** + * Enables threads dumping on critical node failure. + * + * Default is {@code true}. + */ + public static final String IGNITE_DUMP_THREADS_ON_FAILURE = "IGNITE_DUMP_THREADS_ON_FAILURE"; + + /** + * Throttling timeout in millis which avoid excessive PendingTree access on unwind if there is nothing to clean yet. + * + * Default is 500 ms. + */ + public static final String IGNITE_UNWIND_THROTTLING_TIMEOUT = "IGNITE_UNWIND_THROTTLING_TIMEOUT"; + + /** + * Threshold for throttling operations logging. + */ + public static final String IGNITE_THROTTLE_LOG_THRESHOLD = "IGNITE_THROTTLE_LOG_THRESHOLD"; + + /** + * Number of concurrent operation for evict partitions. + */ + public static final String IGNITE_EVICTION_PERMITS = "IGNITE_EVICTION_PERMITS"; + + /** + * Timeout between ZooKeeper client retries, default 2s. + */ + public static final String IGNITE_ZOOKEEPER_DISCOVERY_RETRY_TIMEOUT = "IGNITE_ZOOKEEPER_DISCOVERY_RETRY_TIMEOUT"; + + /** + * Number of attempts to reconnect to ZooKeeper. + */ + public static final String IGNITE_ZOOKEEPER_DISCOVERY_MAX_RETRY_COUNT = "IGNITE_ZOOKEEPER_DISCOVERY_MAX_RETRY_COUNT"; + + /** + * Try reuse memory on deactivation. Useful in case of huge page memory region size. + */ + public static final String IGNITE_REUSE_MEMORY_ON_DEACTIVATE = "IGNITE_REUSE_MEMORY_ON_DEACTIVATE"; + + /** + * Maximum inactivity period for system worker in milliseconds. When this value is exceeded, worker is considered + * blocked with consequent critical failure handler invocation. + */ + public static final String IGNITE_SYSTEM_WORKER_BLOCKED_TIMEOUT = "IGNITE_SYSTEM_WORKER_BLOCKED_TIMEOUT"; + + /** + * Timeout for checkpoint read lock acquisition in milliseconds. + */ + public static final String IGNITE_CHECKPOINT_READ_LOCK_TIMEOUT = "IGNITE_CHECKPOINT_READ_LOCK_TIMEOUT"; + + /** + * Timeout for waiting schema update if schema was not found for last accepted version. + */ + public static final String IGNITE_WAIT_SCHEMA_UPDATE = "IGNITE_WAIT_SCHEMA_UPDATE"; + + /** + * System property to override {@link CacheConfiguration#getRebalanceThrottle} configuration property for all caches. + * {@code 0} by default, which means that override is disabled. + * @deprecated Use {@link IgniteConfiguration#getRebalanceThrottle()} instead. + */ + @Deprecated + public static final String IGNITE_REBALANCE_THROTTLE_OVERRIDE = "IGNITE_REBALANCE_THROTTLE_OVERRIDE"; + + /** + * Enables start caches in parallel. + * + * Default is {@code true}. + */ + public static final String IGNITE_ALLOW_START_CACHES_IN_PARALLEL = "IGNITE_ALLOW_START_CACHES_IN_PARALLEL"; + + /** + * Disables cache interceptor triggering in case of conflicts. + * + * Default is {@code false}. + */ + public static final String IGNITE_DISABLE_TRIGGERING_CACHE_INTERCEPTOR_ON_CONFLICT = "IGNITE_DISABLE_TRIGGERING_CACHE_INTERCEPTOR_ON_CONFLICT"; + + /** + * When set to {@code true}, cache metrics are not included into the discovery metrics update message (in this + * case message contains only cluster metrics). By default cache metrics are included into the message and + * calculated each time the message is sent. + *

+ * Cache metrics sending can also be turned off by disabling statistics per each cache, but in this case some cache + * metrics will be unavailable via JMX too. + */ + public static final String IGNITE_DISCOVERY_DISABLE_CACHE_METRICS_UPDATE = "IGNITE_DISCOVERY_DISABLE_CACHE_METRICS_UPDATE"; + + /** + * Size threshold to allocate and retain additional HashMap to improve contains() + * which leads to extra memory consumption. + */ + public static final String IGNITE_AFFINITY_BACKUPS_THRESHOLD = "IGNITE_AFFINITY_BACKUPS_THRESHOLD"; + + /** + * Flag to disable memory optimization: + * BitSets instead of HashSets to store partitions. + * When number of backups per partion is > IGNITE_AFFINITY_BACKUPS_THRESHOLD we use HashMap to improve contains() + * which leads to extra memory consumption, otherwise we use view on the + * list of cluster nodes to reduce memory consumption on redundant data structures. + */ + public static final String IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION = "IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION"; + + /** + * Limit the maximum number of objects in memory during the recovery procedure. + */ + public static final String IGNITE_RECOVERY_SEMAPHORE_PERMITS = "IGNITE_RECOVERY_SEMAPHORE_PERMITS"; + + /** + * Maximum size of history of server nodes (server node IDs) that ever joined to current topology. + */ + public static final String IGNITE_NODE_IDS_HISTORY_SIZE = "IGNITE_NODE_IDS_HISTORY_SIZE"; + + /** + * Maximum number of diagnostic warning messages per category, when waiting for PME. + */ + public static final String IGNITE_DIAGNOSTIC_WARN_LIMIT = "IGNITE_DIAGNOSTIC_WARN_LIMIT"; + + /** + * Flag to enable triggering failure handler for node if unrecoverable partition inconsistency is + * discovered during partition update counters exchange. + */ + public static final String IGNITE_FAIL_NODE_ON_UNRECOVERABLE_PARTITION_INCONSISTENCY = + "IGNITE_FAIL_NODE_ON_UNRECOVERABLE_PARTITION_INCONSISTENCY"; + + /** + * Enforces singleton. + */ + public static final String IGNITE_QUERY_LAZY_DEFAULT = "IGNITE_QUERY_LAZY_DEFAULT"; + + /** + * Starting from this number of dirty pages in checkpoint, array will be sorted with + * {@link Arrays#parallelSort(Comparable[])} in case of {@link CheckpointWriteOrder#SEQUENTIAL}. + */ + public static final String CHECKPOINT_PARALLEL_SORT_THRESHOLD = "CHECKPOINT_PARALLEL_SORT_THRESHOLD"; + + /** + * Keep static cache configuration even if stored cache data differs from the static config. When this property + * is set, static cache configuration will override persisted configuration. DDL operations are not allowed + * when this system property is set. + */ + public static final String IGNITE_KEEP_STATIC_CACHE_CONFIGURATION = "IGNITE_KEEP_STATIC_CACHE_CONFIGURATION"; + + /** + * Shows if dump requests from local node to near node are allowed, when long running transaction + * is found. If allowed, the compute request to near node will be made to get thread dump of transaction + * owner thread. + */ + public static final String IGNITE_TX_OWNER_DUMP_REQUESTS_ALLOWED = "IGNITE_TX_OWNER_DUMP_REQUESTS_ALLOWED"; + + /** + * Page lock tracker type. + * -1 - Disable lock tracking. + * 1 - HEAP_STACK + * 2 - HEAP_LOG + * 3 - OFF_HEAP_STACK + * 4 - OFF_HEAP_LOG + * + * Default is 2 - HEAP_LOG. + */ + public static final String IGNITE_PAGE_LOCK_TRACKER_TYPE = "IGNITE_PAGE_LOCK_TRACKER_TYPE"; + + /** + * Capacity in pages for storing in page lock tracker strucuture. + * + * Default is 512 pages. + */ + public static final String IGNITE_PAGE_LOCK_TRACKER_CAPACITY = "IGNITE_PAGE_LOCK_TRACKER_CAPACITY"; + + /** + * Page lock tracker thread for checking hangs threads interval. + * + * Default is 60_000 ms. + */ + public static final String IGNITE_PAGE_LOCK_TRACKER_CHECK_INTERVAL = "IGNITE_PAGE_LOCK_TRACKER_CHECK_INTERVAL"; + + /** + * Enables threads locks dumping on critical node failure. + * + * Default is {@code true}. + */ + public static final String IGNITE_DUMP_PAGE_LOCK_ON_FAILURE = "IGNITE_DUMP_PAGE_LOCK_ON_FAILURE"; + + /** + * Scan the classpath on startup and log all the files containing in it. + */ + public static final String IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP = "IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP"; + + /** + * Index rebuilding parallelism level. If specified, sets the count of threads that are used for index rebuilding + * and can only be greater than 0, otherwise default value will be used. Maximum count of threads + * can't be greater than total available processors count. + * Default value is minimum of 4 and processors count / 4, but always greater than 0. + */ + public static final String INDEX_REBUILDING_PARALLELISM = "INDEX_REBUILDING_PARALLELISM"; + + /** Enable write rebalnce statistics into log. Default: false */ + public static final String IGNITE_WRITE_REBALANCE_STATISTICS = "IGNITE_WRITE_REBALANCE_STATISTICS"; + + /** Enable write rebalnce statistics by partitions into log. Default: false */ + public static final String IGNITE_WRITE_REBALANCE_PARTITION_STATISTICS = + "IGNITE_WRITE_REBALANCE_PARTITION_STATISTICS"; + + /** + * Threshold timeout for long transactions, if transaction exceeds it, it will be dumped in log with + * information about how much time did it spent in system time (time while aquiring locks, preparing, + * commiting, etc) and user time (time when client node runs some code while holding transaction and not + * waiting it). Equals 0 if not set. No long transactions are dumped in log if nor this parameter + * neither {@link #IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT} is set. + */ + public static final String IGNITE_LONG_TRANSACTION_TIME_DUMP_THRESHOLD = "IGNITE_LONG_TRANSACTION_TIME_DUMP_THRESHOLD"; + + /** + * The coefficient for samples of completed transactions that will be dumped in log. Must be float value + * between 0.0 and 1.0 inclusive. Default value is 0.0. + */ + public static final String IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT = + "IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT"; + + /** + * The limit of samples of completed transactions that will be dumped in log per second, if + * {@link #IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_COEFFICIENT} is above 0.0. Must be integer value + * greater than 0. Default value is 5. + */ + public static final String IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_PER_SECOND_LIMIT = + "IGNITE_TRANSACTION_TIME_DUMP_SAMPLES_PER_SECOND_LIMIT"; + + /** + * Disables smart DR throttling. Default value is false. + */ + public static final String IGNITE_DISABLE_SMART_DR_THROTTLING = + "IGNITE_DISABLE_SMART_DR_THROTTLING"; + + /** */ + public static final String IGNITE_USE_POOL_FOR_LAZY_QUERIES = "IGNITE_USE_POOL_FOR_LAZY_QUERIES"; + /** * Enforces singleton. */ @@ -909,7 +1258,7 @@ public static boolean getBoolean(String name) { public static boolean getBoolean(String name, boolean dflt) { String val = getString(name); - return val == null ? dflt : Boolean.valueOf(val); + return val == null ? dflt : Boolean.parseBoolean(val); } /** @@ -917,7 +1266,7 @@ public static boolean getBoolean(String name, boolean dflt) { * The result is transformed to {@code int} using {@code Integer.parseInt()} method. * * @param name Name of the system property or environment variable. - * @param dflt Default value + * @param dflt Default value. * @return Integer value of the system property or environment variable. * Returns default value in case neither system property * nor environment variable with given name is found. @@ -945,7 +1294,7 @@ public static int getInteger(String name, int dflt) { * The result is transformed to {@code float} using {@code Float.parseFloat()} method. * * @param name Name of the system property or environment variable. - * @param dflt Default value + * @param dflt Default value. * @return Float value of the system property or environment variable. * Returns default value in case neither system property * nor environment variable with given name is found. @@ -973,7 +1322,7 @@ public static float getFloat(String name, float dflt) { * The result is transformed to {@code long} using {@code Long.parseLong()} method. * * @param name Name of the system property or environment variable. - * @param dflt Default value + * @param dflt Default value. * @return Integer value of the system property or environment variable. * Returns default value in case neither system property * nor environment variable with given name is found. @@ -1001,7 +1350,7 @@ public static long getLong(String name, long dflt) { * The result is transformed to {@code double} using {@code Double.parseDouble()} method. * * @param name Name of the system property or environment variable. - * @param dflt Default value + * @param dflt Default value. * @return Integer value of the system property or environment variable. * Returns default value in case neither system property * nor environment variable with given name is found. diff --git a/modules/core/src/main/java/org/apache/ignite/IgniteTransactions.java b/modules/core/src/main/java/org/apache/ignite/IgniteTransactions.java index dfe6a1a98c5ea..2bb7101a71928 100644 --- a/modules/core/src/main/java/org/apache/ignite/IgniteTransactions.java +++ b/modules/core/src/main/java/org/apache/ignite/IgniteTransactions.java @@ -17,6 +17,7 @@ package org.apache.ignite; +import java.util.Collection; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.configuration.TransactionConfiguration; import org.apache.ignite.transactions.Transaction; @@ -103,4 +104,24 @@ public Transaction txStart(TransactionConcurrency concurrency, TransactionIsolat * Resets transaction metrics. */ public void resetMetrics(); -} \ No newline at end of file + + /** + * Returns a list of active transactions initiated by this node. + *

+ * Note: returned transaction handle will only support getters, {@link Transaction#close()}, + * {@link Transaction#rollback()}, {@link Transaction#rollbackAsync()} methods. + * Trying to invoke other methods will lead to UnsupportedOperationException. + * + * @return Transactions started on local node. + */ + public Collection localActiveTransactions(); + + /** + * Returns instance of Ignite Transactions to mark a transaction with a special label. + * + * @param lb label. + * @return {@code This} for chaining. + * @throws NullPointerException if label is null. + */ + public IgniteTransactions withLabel(String lb); +} diff --git a/modules/core/src/main/java/org/apache/ignite/Ignition.java b/modules/core/src/main/java/org/apache/ignite/Ignition.java index 835896e503c2a..36bcab2066d56 100644 --- a/modules/core/src/main/java/org/apache/ignite/Ignition.java +++ b/modules/core/src/main/java/org/apache/ignite/Ignition.java @@ -141,30 +141,26 @@ public static boolean isDaemon() { } /** - * Sets client mode static flag. + * Sets client mode thread-local flag. *

* This flag used when node is started if {@link IgniteConfiguration#isClientMode()} * is {@code null}. When {@link IgniteConfiguration#isClientMode()} is set this flag is ignored. - * It is recommended to use {@link DiscoverySpi} in client mode too. * * @param clientMode Client mode flag. * @see IgniteConfiguration#isClientMode() - * @see TcpDiscoverySpi#setForceServerMode(boolean) */ public static void setClientMode(boolean clientMode) { IgnitionEx.setClientMode(clientMode); } /** - * Gets client mode static flag. + * Gets client mode thread-local flag. *

* This flag used when node is started if {@link IgniteConfiguration#isClientMode()} * is {@code null}. When {@link IgniteConfiguration#isClientMode()} is set this flag is ignored. - * It is recommended to use {@link DiscoverySpi} in client mode too. * * @return Client mode flag. * @see IgniteConfiguration#isClientMode() - * @see TcpDiscoverySpi#setForceServerMode(boolean) */ public static boolean isClientMode() { return IgnitionEx.isClientMode(); @@ -589,4 +585,4 @@ public static IgniteClient startClient(ClientConfiguration cfg) throws ClientExc return TcpIgniteClient.start(cfg); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/cache/CacheManager.java b/modules/core/src/main/java/org/apache/ignite/cache/CacheManager.java index b572dd5cb3e00..b3767553fdbbe 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/CacheManager.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/CacheManager.java @@ -219,17 +219,7 @@ public CacheManager(URI uri, CachingProvider cachingProvider, ClassLoader clsLdr kernalGateway.readLock(); try { - IgniteCache cache = getCache0(cacheName); - - if (cache != null) { - if(cache.getConfiguration(Configuration.class).getKeyType() != Object.class) - throw new IllegalArgumentException(); - - if(cache.getConfiguration(Configuration.class).getValueType() != Object.class) - throw new IllegalArgumentException(); - } - - return cache; + return getCache0(cacheName); } finally { kernalGateway.readUnlock(); @@ -258,8 +248,7 @@ public CacheManager(URI uri, CachingProvider cachingProvider, ClassLoader clsLdr try { if (kernalGateway.getState() != GridKernalState.STARTED) - return Collections.emptySet(); // javadoc of #getCacheNames() says that IllegalStateException should be - // thrown but CacheManagerTest.close_cachesEmpty() require empty collection. + throw new IllegalStateException(); Collection res = new ArrayList<>(); diff --git a/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java b/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java index 0b1cb87c3a301..951b48bd1f678 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/CacheMetrics.java @@ -240,14 +240,25 @@ public interface CacheMetrics { * Gets number of non-{@code null} values in the cache. * * @return Number of non-{@code null} values in the cache. + * @deprecated Can overflow. Use {@link CacheMetrics#getCacheSize()} instead. */ + @Deprecated public int getSize(); + /** + * Cache size. + * + * @return Cache size. + */ + public long getCacheSize(); + /** * Gets number of keys in the cache, possibly with {@code null} values. * * @return Number of keys in the cache. + * @deprecated Can overflow. Use {@link CacheMetrics#getCacheSize()} instead. */ + @Deprecated public int getKeySize(); /** @@ -490,6 +501,16 @@ public interface CacheMetrics { */ public int getRebalancingPartitionsCount(); + /** + * @return Number of already rebalanced keys. + */ + public long getRebalancedKeys(); + + /** + * @return Number estimated to rebalance keys. + */ + public long getEstimatedRebalancingKeys(); + /** * @return Estimated number of keys to be rebalanced on current node. */ diff --git a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java index 0065bae959d73..e20224746991b 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntity.java @@ -26,9 +26,11 @@ import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; +import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.UUID; import javax.cache.CacheException; import org.apache.ignite.cache.query.annotations.QueryGroupIndex; import org.apache.ignite.cache.query.annotations.QuerySqlField; @@ -36,12 +38,17 @@ import org.apache.ignite.internal.processors.cache.query.QueryEntityClassProperty; import org.apache.ignite.internal.processors.cache.query.QueryEntityTypeDescriptor; import org.apache.ignite.internal.processors.query.GridQueryIndexDescriptor; +import org.apache.ignite.internal.processors.query.QueryField; import org.apache.ignite.internal.processors.query.QueryUtils; +import org.apache.ignite.internal.processors.query.schema.operation.SchemaAbstractOperation; +import org.apache.ignite.internal.processors.query.schema.operation.SchemaAlterTableAddColumnOperation; +import org.apache.ignite.internal.processors.query.schema.operation.SchemaIndexCreateOperation; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** @@ -143,6 +150,172 @@ public QueryEntity(Class keyCls, Class valCls) { this(convert(processKeyAndValueClasses(keyCls, valCls))); } + /** + * Make query entity patch. This patch can only add properties to entity and can't remove them. + * Other words, the patch will contain only add operations(e.g. add column, create index) and not remove ones. + * + * @param target Query entity to which this entity should be expanded. + * @return Patch which contains operations for expanding this entity. + */ + @NotNull public QueryEntityPatch makePatch(QueryEntity target) { + if (target == null) + return QueryEntityPatch.empty(); + + StringBuilder conflicts = new StringBuilder(); + + checkEquals(conflicts, "keyType", keyType, target.keyType); + checkEquals(conflicts, "valType", valType, target.valType); + checkEquals(conflicts, "keyFieldName", keyFieldName, target.keyFieldName); + checkEquals(conflicts, "valueFieldName", valueFieldName, target.valueFieldName); + checkEquals(conflicts, "tableName", tableName, target.tableName); + + List queryFieldsToAdd = checkFields(target, conflicts); + + Collection indexesToAdd = checkIndexes(target, conflicts); + + if (conflicts.length() != 0) + return QueryEntityPatch.conflict(tableName + " conflict: \n" + conflicts.toString()); + + Collection patchOperations = new ArrayList<>(); + + if (!queryFieldsToAdd.isEmpty()) + patchOperations.add(new SchemaAlterTableAddColumnOperation( + UUID.randomUUID(), + null, + null, + tableName, + queryFieldsToAdd, + true, + true + )); + + if (!indexesToAdd.isEmpty()) { + for (QueryIndex index : indexesToAdd) { + patchOperations.add(new SchemaIndexCreateOperation( + UUID.randomUUID(), + null, + null, + tableName, + index, + true, + 0 + )); + } + } + + return QueryEntityPatch.patch(patchOperations); + } + + /** + * Comparing local fields and target fields. + * + * @param target Query entity for check. + * @param conflicts Storage of conflicts. + * @return Indexes which exist in target and not exist in local. + */ + @NotNull private Collection checkIndexes(QueryEntity target, StringBuilder conflicts) { + HashSet indexesToAdd = new HashSet<>(); + + Map currentIndexes = new HashMap<>(); + + for (QueryIndex index : getIndexes()) { + if (currentIndexes.put(index.getName(), index) != null) + throw new IllegalStateException("Duplicate key"); + } + + for (QueryIndex queryIndex : target.getIndexes()) { + if(currentIndexes.containsKey(queryIndex.getName())) { + checkEquals( + conflicts, + "index " + queryIndex.getName(), + currentIndexes.get(queryIndex.getName()), + queryIndex + ); + } + else + indexesToAdd.add(queryIndex); + } + return indexesToAdd; + } + + /** + * Comparing local entity fields and target entity fields. + * + * @param target Query entity for check. + * @param conflicts Storage of conflicts. + * @return Fields which exist in target and not exist in local. + */ + private List checkFields(QueryEntity target, StringBuilder conflicts) { + List queryFieldsToAdd = new ArrayList<>(); + + for (Map.Entry targetField : target.getFields().entrySet()) { + String targetFieldName = targetField.getKey(); + String targetFieldType = targetField.getValue(); + + if (getFields().containsKey(targetFieldName)) { + checkEquals( + conflicts, + "fieldType of " + targetFieldName, + getFields().get(targetFieldName), + targetFieldType + ); + + checkEquals( + conflicts, + "nullable of " + targetFieldName, + contains(getNotNullFields(), targetFieldName), + contains(target.getNotNullFields(), targetFieldName) + ); + + checkEquals( + conflicts, + "default value of " + targetFieldName, + getFromMap(getDefaultFieldValues(), targetFieldName), + getFromMap(target.getDefaultFieldValues(), targetFieldName) + ); + } + else { + queryFieldsToAdd.add(new QueryField( + targetFieldName, + targetFieldType, + !contains(target.getNotNullFields(),targetFieldName), + getFromMap(target.getDefaultFieldValues(), targetFieldName) + )); + } + } + + return queryFieldsToAdd; + } + + /** + * @param collection Collection for checking. + * @param elementToCheck Element for checking to containing in collection. + * @return {@code true} if collection contain elementToCheck. + */ + private static boolean contains(Collection collection, String elementToCheck) { + return collection != null && collection.contains(elementToCheck); + } + + /** + * @return Value from sourceMap or null if map is null. + */ + private static Object getFromMap(Map sourceMap, String key) { + return sourceMap == null ? null : sourceMap.get(key); + } + + /** + * Comparing two objects and add formatted text to conflicts if needed. + * + * @param conflicts Storage of conflicts resulting error message. + * @param name Name of comparing object. + * @param local Local object. + * @param received Received object. + */ + private void checkEquals(StringBuilder conflicts, String name, Object local, Object received) { + if (!Objects.equals(local, received)) + conflicts.append(String.format("%s is different: local=%s, received=%s\n", name, local, received)); + } + /** * Gets key type for this query pair. * @@ -310,7 +483,7 @@ public QueryEntity setValueFieldName(String valueFieldName) { * * @return Collection of index entities. */ - public Collection getIndexes() { + @NotNull public Collection getIndexes() { return idxs == null ? Collections.emptyList() : idxs; } @@ -513,13 +686,10 @@ private static QueryEntity convert(QueryEntityTypeDescriptor desc) { * @return Type descriptor. */ private static QueryEntityTypeDescriptor processKeyAndValueClasses( - Class keyCls, - Class valCls + @NotNull Class keyCls, + @NotNull Class valCls ) { - QueryEntityTypeDescriptor d = new QueryEntityTypeDescriptor(); - - d.keyClass(keyCls); - d.valueClass(valCls); + QueryEntityTypeDescriptor d = new QueryEntityTypeDescriptor(keyCls, valCls); processAnnotationsInClass(true, d.keyClass(), d, null); processAnnotationsInClass(false, d.valueClass(), d, null); diff --git a/modules/core/src/main/java/org/apache/ignite/cache/QueryEntityPatch.java b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntityPatch.java new file mode 100644 index 0000000000000..38e1b2acdef28 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/cache/QueryEntityPatch.java @@ -0,0 +1,118 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cache; + +import java.util.Collection; +import java.util.Objects; +import org.apache.ignite.internal.processors.query.schema.operation.SchemaAbstractOperation; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Query entity patch which contain {@link SchemaAbstractOperation} operations for changing query entity. + * This patch can only add properties to entity and can't remove them. + * Other words, the patch will contain only add operations + * (e.g.: + * {@link org.apache.ignite.internal.processors.query.schema.operation.SchemaAlterTableAddColumnOperation}, + * {@link org.apache.ignite.internal.processors.query.schema.operation.SchemaIndexCreateOperation} + * ) and not remove ones. + * + * It contain only add operation because at the moment we don't have history of schema operations + * and by current state we can't understand some property was already deleted or it has not been added yet. + */ +public class QueryEntityPatch { + /** Empty query entity patch. */ + private static final QueryEntityPatch EMPTY_QUERY_ENTITY_PATCH = new QueryEntityPatch(null, null); + + /** Message which described conflicts during creating this patch. */ + private String conflictsMessage; + + /** Operations for modification query entity. */ + private Collection patchOperations; + + /** + * Create patch. + */ + private QueryEntityPatch(String conflictsMessage, Collection patchOperations) { + this.conflictsMessage = conflictsMessage; + this.patchOperations = patchOperations; + } + + /** + * Builder method for patch with conflicts. + * + * @param conflicts Conflicts. + * @return Query entity patch with conflicts. + */ + public static QueryEntityPatch conflict(String conflicts) { + return new QueryEntityPatch(conflicts, null); + } + + /** + * Builder method for empty patch. + * + * @return Query entity patch. + */ + public static QueryEntityPatch empty() { + return EMPTY_QUERY_ENTITY_PATCH; + } + + /** + * Builder method for patch with operations. + * + * @param patchOperations Operations for modification. + * @return Query entity patch which contain {@link SchemaAbstractOperation} operations for changing query entity. + */ + public static QueryEntityPatch patch(Collection patchOperations) { + return new QueryEntityPatch(null, patchOperations); + } + + /** + * Check for conflict in this patch. + * + * @return {@code true} if patch has conflict. + */ + public boolean hasConflict() { + return conflictsMessage != null; + } + + /** + * @return {@code true} if patch is empty and can't be applying. + */ + public boolean isEmpty() { + return patchOperations == null || patchOperations.isEmpty(); + } + + /** + * @return Conflicts. + */ + public String getConflictsMessage() { + return conflictsMessage; + } + + /** + * @return Patch operations for applying. + */ + public Collection getPatchOperations() { + return patchOperations; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(QueryEntityPatch.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java b/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java index 0e9afb79f2a9d..4f6be40306176 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/affinity/rendezvous/RendezvousAffinityFunction.java @@ -388,9 +388,7 @@ else if ((backupFilter != null && backupFilter.apply(primary, node)) if (!exclNeighborsWarn) { LT.warn(log, "Affinity function excludeNeighbors property is ignored " + - "because topology has no enough nodes to assign backups.", - "Affinity function excludeNeighbors property is ignored " + - "because topology has no enough nodes to assign backups."); + "because topology has no enough nodes to assign backups."); exclNeighborsWarn = true; } diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicy.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicy.java index d58077f5b6e0c..bd1c4ca616bc2 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicy.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/eviction/AbstractEvictionPolicy.java @@ -28,6 +28,9 @@ * Common functionality implementation for eviction policies with max size/max memory and batch eviction support. */ public abstract class AbstractEvictionPolicy implements EvictionPolicy, Externalizable { + /** */ + private static final long serialVersionUID = 4358725333474509598L; + /** Max memory size occupied by elements in container. */ private volatile long maxMemSize; diff --git a/modules/core/src/main/java/org/apache/ignite/cache/eviction/lru/LruEvictionPolicyFactory.java b/modules/core/src/main/java/org/apache/ignite/cache/eviction/lru/LruEvictionPolicyFactory.java index 8f7fbc5c7b95d..ff873aca40117 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/eviction/lru/LruEvictionPolicyFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/eviction/lru/LruEvictionPolicyFactory.java @@ -68,5 +68,4 @@ public LruEvictionPolicyFactory(int maxSize, int batchSize, long maxMemSize) { return policy; } - } diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java b/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java index 9a8fbca3bb281..b0ec17016a60d 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/ContinuousQuery.java @@ -30,22 +30,22 @@ /** * API for configuring continuous cache queries. *

- * Continuous queries allow to register a remote filter and a local listener + * Continuous queries allow registering a remote filter and a local listener * for cache updates. If an update event passes the filter, it will be sent to - * the node that executed the query and local listener will be notified. + * the node that executed the query, and local listener will be notified. *

- * Additionally, you can execute initial query to get currently existing data. + * Additionally, you can execute an initial query to get currently existing data. * Query can be of any type (SQL, TEXT or SCAN) and can be set via {@link #setInitialQuery(Query)} * method. *

* Query can be executed either on all nodes in topology using {@link IgniteCache#query(Query)} * method, or only on the local node, if {@link Query#setLocal(boolean)} parameter is set to {@code true}. - * Note that in case query is distributed and a new node joins, it will get the remote - * filter for the query during discovery process before it actually joins topology, + * Note that if the query is distributed and a new node joins, it will get the remote + * filter for the query during discovery process before it actually joins a topology, * so no updates will be missed. *

Example

- * As an example, suppose we have cache with {@code 'Person'} objects and we need - * to query all persons with salary above 1000. + * As an example, suppose we have a cache with {@code 'Person'} objects and we need + * to query for all people with salary above 1000. *

* Here is the {@code Person} class: *

@@ -60,17 +60,17 @@
  * }
  * 
*

- * You can create and execute continuous query like so: + * You can create and execute a continuous query like so: *

- * // Create new continuous query.
+ * // Create a new continuous query.
  * ContinuousQuery<Long, Person> qry = new ContinuousQuery<>();
  *
- * // Initial iteration query will return all persons with salary above 1000.
+ * // Initial iteration query will return all people with salary above 1000.
  * qry.setInitialQuery(new ScanQuery<>((id, p) -> p.getSalary() > 1000));
  *
  *
  * // Callback that is called locally when update notifications are received.
- * // It simply prints out information about all created persons.
+ * // It simply prints out information about all created or modified records.
  * qry.setLocalListener((evts) -> {
  *     for (CacheEntryEvent<? extends Long, ? extends Person> e : evts) {
  *         Person p = e.getValue();
@@ -79,29 +79,29 @@
  *     }
  * });
  *
- * // Continuous listener will be notified for persons with salary above 1000.
+ * // The continuous listener will be notified for people with salary above 1000.
  * qry.setRemoteFilter(evt -> evt.getValue().getSalary() > 1000);
  *
- * // Execute query and get cursor that iterates through initial data.
+ * // Execute the query and get a cursor that iterates through the initial data.
  * QueryCursor<Cache.Entry<Long, Person>> cur = cache.query(qry);
  * 
- * This will execute query on all nodes that have cache you are working with and - * listener will start to receive notifications for cache updates. + * This will execute query on all nodes that have the cache you are working with and + * listener will start receiving notifications for cache updates. *

* To stop receiving updates call {@link QueryCursor#close()} method: *

  * cur.close();
  * 
- * Note that this works even if you didn't provide initial query. Cursor will + * Note that this works even if you didn't provide the initial query. Cursor will * be empty in this case, but it will still unregister listeners when {@link QueryCursor#close()} * is called. *

* {@link IgniteAsyncCallback} annotation is supported for {@link CacheEntryEventFilter} * (see {@link #setRemoteFilterFactory(Factory)}) and {@link CacheEntryUpdatedListener} * (see {@link #setLocalListener(CacheEntryUpdatedListener)}). - * If filter and/or listener are annotated with {@link IgniteAsyncCallback} then annotated callback - * is executed in async callback pool (see {@link IgniteConfiguration#getAsyncCallbackPoolSize()}) - * and notification order is kept the same as update order for given cache key. + * If a filter and/or listener are annotated with {@link IgniteAsyncCallback} then the annotated callback + * is executed in an async callback pool (see {@link IgniteConfiguration#getAsyncCallbackPoolSize()}) + * and a notification order is kept the same as an update order for a given cache key. * * @see ContinuousQueryWithTransformer * @see IgniteAsyncCallback @@ -130,10 +130,10 @@ public ContinuousQuery setInitialQuery(Query> initQry) { } /** - * Sets local callback. This callback is called only in local node when new updates are received. + * Sets a local callback. This callback is called only on local node when new updates are received. *

- * The callback predicate accepts ID of the node from where updates are received and collection - * of received entries. Note that for removed entries value will be {@code null}. + * The callback predicate accepts ID of the node from where updates are received and a collection + * of the received entries. Note that for removed entries values will be {@code null}. *

* If the predicate returns {@code false}, query execution will be cancelled. *

@@ -141,7 +141,7 @@ public ContinuousQuery setInitialQuery(Query> initQry) { * synchronization or transactional cache operations), should be executed asynchronously without * blocking the thread that called the callback. Otherwise, you can get deadlocks. *

- * If local listener are annotated with {@link IgniteAsyncCallback} then it is executed in async callback pool + * If local listener are annotated with {@link IgniteAsyncCallback} then it is executed in an async callback pool * (see {@link IgniteConfiguration#getAsyncCallbackPoolSize()}) that allow to perform a cache operations. * * @param locLsnr Local callback. @@ -157,8 +157,6 @@ public ContinuousQuery setLocalListener(CacheEntryUpdatedListener lo } /** - * Gets local listener. - * * @return Local listener. */ public CacheEntryUpdatedListener getLocalListener() { @@ -213,7 +211,16 @@ public ContinuousQuery setAutoUnsubscribe(boolean autoUnsubscribe) { return (ContinuousQuery)super.setPageSize(pageSize); } - /** {@inheritDoc} */ + /** + * Sets whether this query should be executed on a local node only. + * + * Note: backup event queues are not kept for local continuous queries. It may lead to loss of notifications in case + * of node failures. Use {@link ContinuousQuery#setRemoteFilterFactory(Factory)} to register cache event listeners + * on all cache nodes, if delivery guarantee is required. + * + * @param loc Local flag. + * @return {@code this} for chaining. + */ @Override public ContinuousQuery setLocal(boolean loc) { return (ContinuousQuery)super.setLocal(loc); } diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/QueryRetryException.java b/modules/core/src/main/java/org/apache/ignite/cache/query/QueryRetryException.java new file mode 100644 index 0000000000000..1574385e79f85 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/QueryRetryException.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.cache.query; + +import org.apache.ignite.IgniteException; + +/** + * The exception is thrown if a query must be retried because database schema or topology are changed. + */ +public class QueryRetryException extends IgniteException { + /** */ + private static final long serialVersionUID = 0L; + + /** + * @param tableName Table name. + */ + public QueryRetryException(String tableName) { + super("Table was modified concurrently (please retry the query): " + tableName); + } +} \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java b/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java index 4e12b8ca03ecc..8fd0abd6d5b82 100644 --- a/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java +++ b/modules/core/src/main/java/org/apache/ignite/cache/query/SqlFieldsQuery.java @@ -20,6 +20,8 @@ import java.util.List; import java.util.concurrent.TimeUnit; import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.internal.IgniteProperties; import org.apache.ignite.internal.processors.query.QueryUtils; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.A; @@ -49,6 +51,10 @@ public class SqlFieldsQuery extends Query> { /** */ private static final long serialVersionUID = 0L; + /** */ + private static boolean DFLT_LAZY = IgniteSystemProperties.getBoolean( + IgniteSystemProperties.IGNITE_QUERY_LAZY_DEFAULT, false); + /** SQL Query. */ private String sql; @@ -72,7 +78,7 @@ public class SqlFieldsQuery extends Query> { private boolean replicatedOnly; /** */ - private boolean lazy; + private boolean lazy = DFLT_LAZY; /** Partitions for query */ private int[] parts; diff --git a/modules/core/src/main/java/org/apache/ignite/client/ClientAuthenticationException.java b/modules/core/src/main/java/org/apache/ignite/client/ClientAuthenticationException.java index dc39c7a0ab780..526690a69c8b0 100644 --- a/modules/core/src/main/java/org/apache/ignite/client/ClientAuthenticationException.java +++ b/modules/core/src/main/java/org/apache/ignite/client/ClientAuthenticationException.java @@ -18,28 +18,16 @@ package org.apache.ignite.client; /** - * Indicates Ignite server the client is connected to closed the connection and no longer available. + * Indicates user name or password is invalid. */ public class ClientAuthenticationException extends ClientException { /** Serial version uid. */ private static final long serialVersionUID = 0L; - /** Message. */ - private static final String MSG = "Invalid user name or password"; - /** * Default constructor. */ - public ClientAuthenticationException() { - super(MSG); - } - - /** - * Constructs a new exception with the specified cause. - * - * @param cause the cause. - */ - public ClientAuthenticationException(Throwable cause) { - super(MSG, cause); + public ClientAuthenticationException(String msg) { + super(msg); } } diff --git a/modules/core/src/main/java/org/apache/ignite/client/ClientAuthorizationException.java b/modules/core/src/main/java/org/apache/ignite/client/ClientAuthorizationException.java new file mode 100644 index 0000000000000..cacede67bb356 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/client/ClientAuthorizationException.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.client; + +/** + * Indicates user has no permission to perform operation. + */ +public class ClientAuthorizationException extends ClientException { + /** Serial version uid. */ + private static final long serialVersionUID = 0L; + + /** Message. */ + private static final String MSG = "User is not authorized to perform this operation"; + + /** + * Default constructor. + */ + public ClientAuthorizationException() { + super(MSG); + } + + /** + * Constructs a new exception with the specified cause and a detail + * message of (cause==null ? null : cause.toString()). + * + * @param cause the cause. + */ + public ClientAuthorizationException(Throwable cause) { + super(MSG, cause); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/client/ClientCacheConfiguration.java b/modules/core/src/main/java/org/apache/ignite/client/ClientCacheConfiguration.java index b51dcaa4e851e..f3a2360875db7 100644 --- a/modules/core/src/main/java/org/apache/ignite/client/ClientCacheConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/client/ClientCacheConfiguration.java @@ -26,6 +26,7 @@ import org.apache.ignite.cache.PartitionLossPolicy; import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.internal.util.typedef.internal.S; /** Cache configuration. */ @@ -61,10 +62,10 @@ public final class ClientCacheConfiguration implements Serializable { private boolean readFromBackup = CacheConfiguration.DFLT_READ_FROM_BACKUP; /** @serial Rebalance batch size. */ - private int rebalanceBatchSize = CacheConfiguration.DFLT_REBALANCE_BATCH_SIZE; + private int rebalanceBatchSize = IgniteConfiguration.DFLT_REBALANCE_BATCH_SIZE; /** @serial Rebalance batches prefetch count. */ - private long rebalanceBatchesPrefetchCnt = CacheConfiguration.DFLT_REBALANCE_BATCHES_PREFETCH_COUNT; + private long rebalanceBatchesPrefetchCnt = IgniteConfiguration.DFLT_REBALANCE_BATCHES_PREFETCH_COUNT; /** @serial Rebalance delay. */ private long rebalanceDelay = 0; @@ -76,10 +77,10 @@ public final class ClientCacheConfiguration implements Serializable { private int rebalanceOrder = 0; /** @serial Rebalance throttle. */ - private long rebalanceThrottle = CacheConfiguration.DFLT_REBALANCE_THROTTLE; + private long rebalanceThrottle = IgniteConfiguration.DFLT_REBALANCE_THROTTLE; /** @serial @serial Rebalance timeout. */ - private long rebalanceTimeout = CacheConfiguration.DFLT_REBALANCE_TIMEOUT; + private long rebalanceTimeout = IgniteConfiguration.DFLT_REBALANCE_TIMEOUT; /** @serial Write synchronization mode. */ private CacheWriteSynchronizationMode writeSynchronizationMode = CacheWriteSynchronizationMode.PRIMARY_SYNC; diff --git a/modules/core/src/main/java/org/apache/ignite/client/ClientException.java b/modules/core/src/main/java/org/apache/ignite/client/ClientException.java index 05556358fb53c..b0d9f6cc5cfee 100644 --- a/modules/core/src/main/java/org/apache/ignite/client/ClientException.java +++ b/modules/core/src/main/java/org/apache/ignite/client/ClientException.java @@ -20,7 +20,7 @@ /** * Common thin client checked exception. */ -public class ClientException extends Exception { +public class ClientException extends RuntimeException { /** Serial version uid. */ private static final long serialVersionUID = 0L; @@ -28,6 +28,7 @@ public class ClientException extends Exception { * Constructs a new exception with {@code null} as its detail message. */ public ClientException() { + // No-op. } /** diff --git a/modules/core/src/main/java/org/apache/ignite/cluster/ClusterMetrics.java b/modules/core/src/main/java/org/apache/ignite/cluster/ClusterMetrics.java index 74e98b857334b..751e62873d3ff 100644 --- a/modules/core/src/main/java/org/apache/ignite/cluster/ClusterMetrics.java +++ b/modules/core/src/main/java/org/apache/ignite/cluster/ClusterMetrics.java @@ -694,4 +694,11 @@ public interface ClusterMetrics { * @return Total number of nodes. */ public int getTotalNodes(); + + /** + * Gets execution duration for current partition map exchange in milliseconds. + * + * @return Gets execution duration for current partition map exchange in milliseconds. {@code 0} if there is no running PME. + */ + public long getCurrentPmeDuration(); } diff --git a/modules/core/src/main/java/org/apache/ignite/cluster/ClusterNode.java b/modules/core/src/main/java/org/apache/ignite/cluster/ClusterNode.java index 97b31653a9e51..bfc6c1813a246 100644 --- a/modules/core/src/main/java/org/apache/ignite/cluster/ClusterNode.java +++ b/modules/core/src/main/java/org/apache/ignite/cluster/ClusterNode.java @@ -245,17 +245,11 @@ public interface ClusterNode extends BaselineNode { public boolean isDaemon(); /** - * Tests whether or not this node is connected to cluster as a client. - *

- * Do not confuse client in terms of - * discovery {@link DiscoverySpi#isClientMode()} and client in terms of cache - * {@link IgniteConfiguration#isClientMode()}. Cache clients cannot carry data, - * while topology clients connect to topology in a different way. + * Whether this node is cache client (see {@link IgniteConfiguration#isClientMode()}). + * + * @return {@code True if client}. * - * @return {@code True} if this node is a client node, {@code false} otherwise. * @see IgniteConfiguration#isClientMode() - * @see Ignition#isClientMode() - * @see DiscoverySpi#isClientMode() */ public boolean isClient(); } \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/compute/ComputeTaskAdapter.java b/modules/core/src/main/java/org/apache/ignite/compute/ComputeTaskAdapter.java index c5352aacb41ff..fc55ad9f30f8f 100644 --- a/modules/core/src/main/java/org/apache/ignite/compute/ComputeTaskAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/compute/ComputeTaskAdapter.java @@ -99,7 +99,7 @@ public abstract class ComputeTaskAdapter implements ComputeTask { return ComputeJobResultPolicy.FAILOVER; throw new IgniteException("Remote job threw user exception (override or implement ComputeTask.result(..) " + - "method if you would like to have automatic failover for this exception).", e); + "method if you would like to have automatic failover for this exception): " + e.getMessage(), e); } // Wait for all job responses. diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java index 2e35f37077b88..302fcdde04e92 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/CacheConfiguration.java @@ -79,18 +79,33 @@ public class CacheConfiguration extends MutableConfiguration { /** Maximum number of partitions. */ public static final int MAX_PARTITIONS_COUNT = 65000; - /** Default size of rebalance thread pool. */ + /** + * Default size of rebalance thread pool. + * @deprecated Use {@link IgniteConfiguration#DFLT_REBALANCE_THREAD_POOL_SIZE} instead. + */ @Deprecated - public static final int DFLT_REBALANCE_THREAD_POOL_SIZE = 2; + public static final int DFLT_REBALANCE_THREAD_POOL_SIZE = IgniteConfiguration.DFLT_REBALANCE_THREAD_POOL_SIZE; - /** Default rebalance timeout (ms).*/ - public static final long DFLT_REBALANCE_TIMEOUT = 10000; + /** + * Default rebalance timeout (ms). + * @deprecated Use {@link IgniteConfiguration#DFLT_REBALANCE_TIMEOUT} instead. + */ + @Deprecated + public static final long DFLT_REBALANCE_TIMEOUT = IgniteConfiguration.DFLT_REBALANCE_TIMEOUT; - /** Default rebalance batches prefetch count. */ - public static final long DFLT_REBALANCE_BATCHES_PREFETCH_COUNT = 2; + /** + * Default rebalance batches prefetch count. + * @deprecated Use {@link IgniteConfiguration#DFLT_REBALANCE_BATCHES_PREFETCH_COUNT} instead. + */ + @Deprecated + public static final long DFLT_REBALANCE_BATCHES_PREFETCH_COUNT = IgniteConfiguration.DFLT_REBALANCE_BATCHES_PREFETCH_COUNT; - /** Time in milliseconds to wait between rebalance messages to avoid overloading CPU. */ - public static final long DFLT_REBALANCE_THROTTLE = 0; + /** + * Time in milliseconds to wait between rebalance messages to avoid overloading CPU. + * @deprecated Use {@link IgniteConfiguration#DFLT_REBALANCE_THROTTLE} instead. + */ + @Deprecated + public static final long DFLT_REBALANCE_THROTTLE = IgniteConfiguration.DFLT_REBALANCE_THROTTLE; /** Default number of backups. */ public static final int DFLT_BACKUPS = 0; @@ -119,8 +134,12 @@ public class CacheConfiguration extends MutableConfiguration { /** Default rebalance mode for distributed cache. */ public static final CacheRebalanceMode DFLT_REBALANCE_MODE = CacheRebalanceMode.ASYNC; - /** Default rebalance batch size in bytes. */ - public static final int DFLT_REBALANCE_BATCH_SIZE = 512 * 1024; // 512K + /** + * Default rebalance batch size in bytes. + * @deprecated Use {@link IgniteConfiguration#DFLT_REBALANCE_BATCH_SIZE} instead. + */ + @Deprecated + public static final int DFLT_REBALANCE_BATCH_SIZE = IgniteConfiguration.DFLT_REBALANCE_BATCH_SIZE; /** Default value for eager ttl flag. */ public static final boolean DFLT_EAGER_TTL = true; @@ -204,6 +223,7 @@ public class CacheConfiguration extends MutableConfiguration { private int rebalancePoolSize = DFLT_REBALANCE_THREAD_POOL_SIZE; /** Rebalance timeout. */ + @Deprecated private long rebalanceTimeout = DFLT_REBALANCE_TIMEOUT; /** Cache eviction policy. */ @@ -274,9 +294,11 @@ public class CacheConfiguration extends MutableConfiguration { private int rebalanceOrder; /** Rebalance batch size. */ + @Deprecated private int rebalanceBatchSize = DFLT_REBALANCE_BATCH_SIZE; /** Rebalance batches prefetch count. */ + @Deprecated private long rebalanceBatchesPrefetchCnt = DFLT_REBALANCE_BATCHES_PREFETCH_COUNT; /** Maximum number of concurrent asynchronous operations. */ @@ -312,11 +334,12 @@ public class CacheConfiguration extends MutableConfiguration { /** */ private long rebalanceDelay; - /** */ + /** Time in milliseconds to wait between rebalance messages to avoid overloading CPU. */ + @Deprecated private long rebalanceThrottle = DFLT_REBALANCE_THROTTLE; /** */ - private CacheInterceptor interceptor; + private CacheInterceptor interceptor; /** */ private Class[] sqlFuncCls; @@ -1181,7 +1204,9 @@ public CacheConfiguration setRebalanceOrder(int rebalanceOrder) { * {@link #DFLT_REBALANCE_BATCH_SIZE}. * * @return Size in bytes of a single rebalance message. + * @deprecated Use {@link IgniteConfiguration#getRebalanceBatchSize()} instead. */ + @Deprecated public int getRebalanceBatchSize() { return rebalanceBatchSize; } @@ -1191,7 +1216,9 @@ public int getRebalanceBatchSize() { * * @param rebalanceBatchSize Rebalance batch size. * @return {@code this} for chaining. + * @deprecated Use {@link IgniteConfiguration#setRebalanceBatchSize(int)} instead. */ + @Deprecated public CacheConfiguration setRebalanceBatchSize(int rebalanceBatchSize) { this.rebalanceBatchSize = rebalanceBatchSize; @@ -1206,7 +1233,9 @@ public CacheConfiguration setRebalanceBatchSize(int rebalanceBatchSize) { * Minimum is 1. * * @return batches count + * @deprecated Use {@link IgniteConfiguration#getRebalanceBatchesPrefetchCount()} instead. */ + @Deprecated public long getRebalanceBatchesPrefetchCount() { return rebalanceBatchesPrefetchCnt; } @@ -1220,7 +1249,9 @@ public long getRebalanceBatchesPrefetchCount() { * * @param rebalanceBatchesCnt batches count. * @return {@code this} for chaining. + * @deprecated Use {@link IgniteConfiguration#setRebalanceBatchesPrefetchCount(long)} instead. */ + @Deprecated public CacheConfiguration setRebalanceBatchesPrefetchCount(long rebalanceBatchesCnt) { this.rebalanceBatchesPrefetchCnt = rebalanceBatchesCnt; @@ -1469,7 +1500,9 @@ public CacheConfiguration setRebalanceThreadPoolSize(int rebalancePoolSize * Default value is {@link #DFLT_REBALANCE_TIMEOUT}. * * @return Rebalance timeout (ms). + * @deprecated Use {@link IgniteConfiguration#getRebalanceTimeout()} instead. */ + @Deprecated public long getRebalanceTimeout() { return rebalanceTimeout; } @@ -1479,7 +1512,9 @@ public long getRebalanceTimeout() { * * @param rebalanceTimeout Rebalance timeout (ms). * @return {@code this} for chaining. + * @deprecated Use {@link IgniteConfiguration#setRebalanceTimeout(long)} instead. */ + @Deprecated public CacheConfiguration setRebalanceTimeout(long rebalanceTimeout) { this.rebalanceTimeout = rebalanceTimeout; @@ -1536,8 +1571,10 @@ public CacheConfiguration setRebalanceDelay(long rebalanceDelay) { * the default is defined by {@link #DFLT_REBALANCE_THROTTLE} constant. * * @return Time in milliseconds to wait between rebalance messages to avoid overloading of CPU, - * {@code 0} to disable throttling. + * {@code 0} to disable throttling. + * @deprecated Use {@link IgniteConfiguration#getRebalanceThrottle()} instead. */ + @Deprecated public long getRebalanceThrottle() { return rebalanceThrottle; } @@ -1553,7 +1590,9 @@ public long getRebalanceThrottle() { * @param rebalanceThrottle Time in milliseconds to wait between rebalance messages to avoid overloading of CPU, * {@code 0} to disable throttling. * @return {@code this} for chaining. + * @deprecated Use {@link IgniteConfiguration#setRebalanceThrottle(long)} instead. */ + @Deprecated public CacheConfiguration setRebalanceThrottle(long rebalanceThrottle) { this.rebalanceThrottle = rebalanceThrottle; @@ -1617,9 +1656,8 @@ public CacheConfiguration setMaxQueryIteratorsCount(int maxQryIterCnt) { * * @return Cache interceptor. */ - @SuppressWarnings({"unchecked"}) @Nullable public CacheInterceptor getInterceptor() { - return (CacheInterceptor)interceptor; + return interceptor; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/ClientConnectorConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/ClientConnectorConfiguration.java index ed30db3f3b602..16be1984b721d 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/ClientConnectorConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/ClientConnectorConfiguration.java @@ -45,6 +45,9 @@ public class ClientConnectorConfiguration { /** Default size of thread pool. */ public static final int DFLT_THREAD_POOL_SIZE = IgniteConfiguration.DFLT_PUBLIC_THREAD_CNT; + /** Default handshake timeout. */ + public static final int DFLT_HANDSHAKE_TIMEOUT = 10_000; + /** Default idle timeout. */ public static final int DFLT_IDLE_TIMEOUT = 0; @@ -78,6 +81,9 @@ public class ClientConnectorConfiguration { /** Idle timeout. */ private long idleTimeout = DFLT_IDLE_TIMEOUT; + /** Handshake timeout. */ + private long handshakeTimeout = DFLT_HANDSHAKE_TIMEOUT; + /** JDBC connections enabled flag. */ private boolean jdbcEnabled = true; @@ -123,6 +129,10 @@ public ClientConnectorConfiguration(ClientConnectorConfiguration cfg) { tcpNoDelay = cfg.isTcpNoDelay(); threadPoolSize = cfg.getThreadPoolSize(); idleTimeout = cfg.getIdleTimeout(); + handshakeTimeout = cfg.getHandshakeTimeout(); + jdbcEnabled = cfg.jdbcEnabled; + odbcEnabled = cfg.odbcEnabled; + thinCliEnabled = cfg.thinCliEnabled; sslEnabled = cfg.isSslEnabled(); sslClientAuth = cfg.isSslClientAuth(); useIgniteSslCtxFactory = cfg.isUseIgniteSslContextFactory(); @@ -332,6 +342,34 @@ public ClientConnectorConfiguration setIdleTimeout(long idleTimeout) { return this; } + /** + * Gets handshake timeout for client connections. + * If no successful handshake is performed within this timeout upon successfull establishment of TCP connection, + * the connection is closed. + * Zero or negative means no timeout. + * + * @return Handshake timeout in milliseconds. + */ + public long getHandshakeTimeout() { + return handshakeTimeout; + } + + /** + * Sets handshake timeout for client connections. + * If no successful handshake is performed within this timeout upon successfull establishment of TCP connection, + * the connection is closed. + * Zero or negative means no timeout. + * + * @param handshakeTimeout Idle timeout in milliseconds. + * @see #getHandshakeTimeout() + * @return {@code this} for chaining. + */ + public ClientConnectorConfiguration setHandshakeTimeout(long handshakeTimeout) { + this.handshakeTimeout = handshakeTimeout; + + return this; + } + /** * Gets whether access through JDBC is enabled. *

diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/CommunicationFailureContext.java b/modules/core/src/main/java/org/apache/ignite/configuration/CommunicationFailureContext.java new file mode 100644 index 0000000000000..a32d38c65be60 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/configuration/CommunicationFailureContext.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.configuration; + +import java.util.List; +import java.util.Map; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.spi.communication.CommunicationSpi; + +/** + * Communication Failure Context. + */ +public interface CommunicationFailureContext { + /** + * @return Current topology snapshot. + */ + public List topologySnapshot(); + + /** + * @param node1 First node. + * @param node2 Second node. + * @return {@code True} if {@link CommunicationSpi} is able to establish connection from first node to second node. + */ + public boolean connectionAvailable(ClusterNode node1, ClusterNode node2); + + /** + * @return Currently started caches. + */ + public Map> startedCaches(); + + /** + * @param cacheName Cache name. + * @return Cache partitions affinity assignment. + */ + public List> cacheAffinity(String cacheName); + + /** + * @param cacheName Cache name. + * @return Cache partitions owners. + */ + public List> cachePartitionOwners(String cacheName); + + /** + * @param node Node to kill. + */ + public void killNode(ClusterNode node); +} diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/CommunicationFailureResolver.java b/modules/core/src/main/java/org/apache/ignite/configuration/CommunicationFailureResolver.java new file mode 100644 index 0000000000000..a4d92f33c73f0 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/configuration/CommunicationFailureResolver.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.configuration; + +/** + * Communication Failure Resolver. + */ +public interface CommunicationFailureResolver { + /** + * @param ctx Context. + */ + public void resolve(CommunicationFailureContext ctx); +} diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java index a433760ce4cc9..92754c862911a 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DataStorageConfiguration.java @@ -18,6 +18,7 @@ package org.apache.ignite.configuration; import java.io.Serializable; +import java.util.zip.Deflater; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.internal.processors.cache.persistence.file.AsyncFileIOFactory; import org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory; @@ -152,6 +153,9 @@ public class DataStorageConfiguration implements Serializable { /** Default wal compaction enabled. */ public static final boolean DFLT_WAL_COMPACTION_ENABLED = false; + /** Default wal compaction level. */ + public static final int DFLT_WAL_COMPACTION_LEVEL = Deflater.BEST_SPEED; + /** Size of a memory chunk reserved for system cache initially. */ private long sysRegionInitSize = DFLT_SYS_CACHE_INIT_SIZE; @@ -258,6 +262,18 @@ public class DataStorageConfiguration implements Serializable { */ private boolean walCompactionEnabled = DFLT_WAL_COMPACTION_ENABLED; + /** + * ZIP level to WAL compaction. + * + * @see java.util.zip.ZipOutputStream#setLevel(int) + * @see java.util.zip.Deflater#BEST_SPEED + * @see java.util.zip.Deflater#BEST_COMPRESSION + */ + private int walCompactionLevel = DFLT_WAL_COMPACTION_LEVEL; + + /** Timeout for checkpoint read lock acquisition. */ + private Long checkpointReadLockTimeout; + /** * Initial size of a data region reserved for system cache. * @@ -388,6 +404,7 @@ public DataRegionConfiguration getDefaultDataRegionConfiguration() { /** * Overrides configuration of default data region which is created automatically. + * * @param dfltDataRegConf Default data region configuration. */ public DataStorageConfiguration setDefaultDataRegionConfiguration(DataRegionConfiguration dfltDataRegConf) { @@ -909,6 +926,44 @@ public DataStorageConfiguration setWalCompactionEnabled(boolean walCompactionEna return this; } + /** + * @return ZIP level to WAL compaction. + */ + public int getWalCompactionLevel() { + return walCompactionLevel; + } + + /** + * @param walCompactionLevel New ZIP level to WAL compaction. + */ + public void setWalCompactionLevel(int walCompactionLevel) { + this.walCompactionLevel = walCompactionLevel; + } + + /** + * Returns timeout for checkpoint read lock acquisition. + * + * @see #setCheckpointReadLockTimeout(long) + * @return Returns timeout for checkpoint read lock acquisition in milliseconds. + */ + public Long getCheckpointReadLockTimeout() { + return checkpointReadLockTimeout; + } + + /** + * Sets timeout for checkpoint read lock acquisition. + *

+ * When any thread cannot acquire checkpoint read lock in this time, then critical failure handler is being called. + * + * @param checkpointReadLockTimeout Timeout for checkpoint read lock acquisition in milliseconds. + * @return {@code this} for chaining. + */ + public DataStorageConfiguration setCheckpointReadLockTimeout(long checkpointReadLockTimeout) { + this.checkpointReadLockTimeout = checkpointReadLockTimeout; + + return this; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(DataStorageConfiguration.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java b/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java new file mode 100644 index 0000000000000..46c79cb41cb25 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/configuration/DefaultCommunicationFailureResolver.java @@ -0,0 +1,293 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.configuration; + +import java.util.BitSet; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.stream.Collectors; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.cluster.graph.BitSetIterator; +import org.apache.ignite.internal.cluster.graph.ClusterGraph; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.resources.LoggerResource; +import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; + +/** + * Default Communication Failure Resolver. + */ +public class DefaultCommunicationFailureResolver implements CommunicationFailureResolver { + /** */ + @LoggerResource + private IgniteLogger log; + + /** {@inheritDoc} */ + @Override public void resolve(CommunicationFailureContext ctx) { + ClusterPart largestCluster = findLargestConnectedCluster(ctx); + + if (largestCluster == null) + return; + + if (log.isInfoEnabled()) + log.info("Communication problem resolver found fully connected independent cluster [" + + "serverNodesCnt=" + largestCluster.srvNodesCnt + ", " + + "clientNodesCnt=" + largestCluster.connectedClients.size() + ", " + + "totalAliveNodes=" + ctx.topologySnapshot().size() + ", " + + "serverNodesIds=" + clusterNodeIds(largestCluster.srvNodesSet, ctx.topologySnapshot(), 1000) + "]"); + + keepCluster(ctx, largestCluster); + } + + /** + * Finds largest part of the cluster where each node is able to connect to each other. + * + * @param ctx Communication failure context. + * @return Largest part of the cluster nodes to keep. + */ + @Nullable private ClusterPart findLargestConnectedCluster(CommunicationFailureContext ctx) { + List srvNodes = ctx.topologySnapshot() + .stream() + .filter(node -> !node.isClient()) + .collect(Collectors.toList()); + + // Exclude client nodes from analysis. + ClusterGraph graph = new ClusterGraph(ctx, ClusterNode::isClient); + + List components = graph.findConnectedComponents(); + + if (components.isEmpty()) { + U.warn(log, "Unable to find at least one alive server node in the cluster " + ctx); + + return null; + } + + if (components.size() == 1) { + BitSet nodesSet = components.get(0); + int nodeCnt = nodesSet.cardinality(); + + boolean fullyConnected = graph.checkFullyConnected(nodesSet); + + if (fullyConnected && nodeCnt == srvNodes.size()) { + Set clients = findConnectedClients(ctx, nodesSet); + + if (clients.size() + nodeCnt == ctx.topologySnapshot().size()) { + U.warn(log, "All alive nodes are fully connected, this should be resolved automatically."); + + return null; + } + else + return new ClusterPart(nodesSet, clients); + } + + if (log.isInfoEnabled()) + log.info("Communication problem resolver detected partial lost for some connections inside cluster. " + + "Will keep largest set of healthy fully-connected nodes. Other nodes will be killed forcibly."); + + BitSet fullyConnectedPart = graph.findLargestFullyConnectedComponent(nodesSet); + Set connectedClients = findConnectedClients(ctx, fullyConnectedPart); + + return new ClusterPart(fullyConnectedPart, connectedClients); + } + + // If cluster has splitted on several parts and there are at least 2 parts which aren't single node + // It means that split brain has happened. + boolean isSplitBrain = components.size() > 1 && + components.stream().filter(cmp -> cmp.size() > 1).count() > 1; + + if (isSplitBrain) + U.warn(log, "Communication problem resolver detected split brain. " + + "Cluster has splitted on " + components.size() + " independent parts. " + + "Will keep only one largest fully-connected part. " + + "Other nodes will be killed forcibly."); + else + U.warn(log, "Communication problem resolver detected full lost for some connections inside cluster. " + + "Problem nodes will be found and killed forcibly."); + + // For each part of splitted cluster extract largest fully-connected component. + ClusterPart largestCluster = null; + for (int i = 0; i < components.size(); i++) { + BitSet clusterPart = components.get(i); + + BitSet fullyConnectedPart = graph.findLargestFullyConnectedComponent(clusterPart); + Set connectedClients = findConnectedClients(ctx, fullyConnectedPart); + + ClusterPart curr = new ClusterPart(fullyConnectedPart, connectedClients); + + if (largestCluster == null || curr.compareTo(largestCluster) > 0) + largestCluster = curr; + } + + assert largestCluster != null + : "Unable to find at least one alive independent cluster."; + + return largestCluster; + } + + /** + * Keeps server cluster nodes presented in given {@code srvNodesSet}. + * Client nodes which have connections to presented {@code srvNodesSet} will be also keeped. + * Other nodes will be killed forcibly. + * + * @param ctx Communication failure context. + * @param clusterPart Set of nodes need to keep in the cluster. + */ + private void keepCluster(CommunicationFailureContext ctx, ClusterPart clusterPart) { + List allNodes = ctx.topologySnapshot(); + + // Kill server nodes. + for (int idx = 0; idx < allNodes.size(); idx++) { + ClusterNode node = allNodes.get(idx); + + // Client nodes will be processed separately. + if (node.isClient()) + continue; + + if (!clusterPart.srvNodesSet.get(idx)) + ctx.killNode(node); + } + + // Kill client nodes unable to connect to the presented part of cluster. + for (int idx = 0; idx < allNodes.size(); idx++) { + ClusterNode node = allNodes.get(idx); + + if (node.isClient() && !clusterPart.connectedClients.contains(node)) + ctx.killNode(node); + } + } + + /** + * Finds set of the client nodes which are able to connect to given set of server nodes {@code srvNodesSet}. + * + * @param ctx Communication failure context. + * @param srvNodesSet Server nodes set. + * @return Set of client nodes. + */ + private Set findConnectedClients(CommunicationFailureContext ctx, BitSet srvNodesSet) { + Set connectedClients = new HashSet<>(); + + List allNodes = ctx.topologySnapshot(); + + for (ClusterNode node : allNodes) { + if (!node.isClient()) + continue; + + boolean hasConnections = true; + + Iterator it = new BitSetIterator(srvNodesSet); + while (it.hasNext()) { + int srvNodeIdx = it.next(); + ClusterNode srvNode = allNodes.get(srvNodeIdx); + + if (!ctx.connectionAvailable(node, srvNode) || !ctx.connectionAvailable(srvNode, node)) { + hasConnections = false; + + break; + } + } + + if (hasConnections) + connectedClients.add(node); + } + + return connectedClients; + } + + /** + * Class representing part of cluster. + */ + private static class ClusterPart implements Comparable { + /** Server nodes count. */ + int srvNodesCnt; + + /** Server nodes set. */ + BitSet srvNodesSet; + + /** Set of client nodes are able to connect to presented part of server nodes. */ + Set connectedClients; + + /** + * Constructor. + * + * @param srvNodesSet Server nodes set. + * @param connectedClients Set of client nodes. + */ + public ClusterPart(BitSet srvNodesSet, Set connectedClients) { + this.srvNodesSet = srvNodesSet; + this.srvNodesCnt = srvNodesSet.cardinality(); + this.connectedClients = connectedClients; + } + + /** {@inheritDoc} */ + @Override public int compareTo(@NotNull ClusterPart o) { + int srvNodesCmp = Integer.compare(srvNodesCnt, o.srvNodesCnt); + + if (srvNodesCmp != 0) + return srvNodesCmp; + + return Integer.compare(connectedClients.size(), o.connectedClients.size()); + } + } + + /** + * @param cluster Cluster nodes mask. + * @param nodes Nodes. + * @param limit IDs limit. + * @return Cluster node IDs string. + */ + private static String clusterNodeIds(BitSet cluster, List nodes, int limit) { + int startIdx = 0; + + StringBuilder builder = new StringBuilder(); + + int cnt = 0; + + for (;;) { + int idx = cluster.nextSetBit(startIdx); + + if (idx == -1) + break; + + startIdx = idx + 1; + + if (builder.length() == 0) + builder.append('['); + else + builder.append(", "); + + builder.append(nodes.get(idx).id()); + + if (cnt++ > limit) + builder.append(", ..."); + } + + builder.append(']'); + + return builder.toString(); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(DefaultCommunicationFailureResolver.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java index add388045d1e4..ef5930cb16d97 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/IgniteConfiguration.java @@ -21,6 +21,7 @@ import java.lang.management.ManagementFactory; import java.util.Map; import java.util.UUID; +import java.util.zip.Deflater; import javax.cache.configuration.Factory; import javax.cache.event.CacheEntryListener; import javax.cache.expiry.ExpiryPolicy; @@ -118,6 +119,9 @@ public class IgniteConfiguration { /** Default maximum timeout to wait for network responses in milliseconds (value is {@code 5,000ms}). */ public static final long DFLT_NETWORK_TIMEOUT = 5000; + /** Default compression level for network messages (value is Deflater.BEST_SPEED. */ + public static final int DFLT_NETWORK_COMPRESSION = Deflater.BEST_SPEED; + /** Default interval between message send retries. */ public static final long DFLT_SEND_RETRY_DELAY = 1000; @@ -151,6 +155,18 @@ public class IgniteConfiguration { /** Default limit of threads used for rebalance. */ public static final int DFLT_REBALANCE_THREAD_POOL_SIZE = 1; + /** Default rebalance message timeout in milliseconds (value is {@code 10000}). */ + public static final long DFLT_REBALANCE_TIMEOUT = 10000; + + /** Default rebalance batches prefetch count (value is {@code 2}). */ + public static final long DFLT_REBALANCE_BATCHES_PREFETCH_COUNT = 2; + + /** Time to wait between rebalance messages in milliseconds to avoid overloading CPU (value is {@code 0}). */ + public static final long DFLT_REBALANCE_THROTTLE = 0; + + /** Default rebalance batch size in bytes (value is {@code 512Kb}). */ + public static final int DFLT_REBALANCE_BATCH_SIZE = 512 * 1024; // 512K + /** Default size of system thread pool. */ public static final int DFLT_SYSTEM_CORE_THREAD_CNT = DFLT_PUBLIC_THREAD_CNT; @@ -207,6 +223,9 @@ public class IgniteConfiguration { @SuppressWarnings("UnnecessaryBoxing") public static final Long DFLT_FAILURE_DETECTION_TIMEOUT = new Long(10_000); + /** Default system worker blocked timeout in millis. */ + public static final Long DFLT_SYS_WORKER_BLOCKED_TIMEOUT = 2 * 60 * 1000L; + /** Default failure detection timeout for client nodes in millis. */ @SuppressWarnings("UnnecessaryBoxing") public static final Long DFLT_CLIENT_FAILURE_DETECTION_TIMEOUT = new Long(30_000); @@ -295,6 +314,9 @@ public class IgniteConfiguration { /** Maximum network requests timeout. */ private long netTimeout = DFLT_NETWORK_TIMEOUT; + /** Compression level for network binary messages. */ + private int netCompressionLevel = DFLT_NETWORK_COMPRESSION; + /** Interval between message send retries. */ private long sndRetryDelay = DFLT_SEND_RETRY_DELAY; @@ -370,6 +392,18 @@ public class IgniteConfiguration { /** Rebalance thread pool size. */ private int rebalanceThreadPoolSize = DFLT_REBALANCE_THREAD_POOL_SIZE; + /** Rrebalance messages timeout in milliseconds. */ + private long rebalanceTimeout = DFLT_REBALANCE_TIMEOUT; + + /** Rebalance batches prefetch count. */ + private long rebalanceBatchesPrefetchCnt = DFLT_REBALANCE_BATCHES_PREFETCH_COUNT; + + /** Time to wait between rebalance messages in milliseconds. */ + private long rebalanceThrottle = DFLT_REBALANCE_THROTTLE; + + /** Rebalance batch size in bytes. */ + private int rebalanceBatchSize = DFLT_REBALANCE_BATCH_SIZE; + /** Transactions configuration. */ private TransactionConfiguration txCfg = new TransactionConfiguration(); @@ -401,6 +435,9 @@ public class IgniteConfiguration { /** Failure detection timeout. */ private Long failureDetectionTimeout = DFLT_FAILURE_DETECTION_TIMEOUT; + /** Timeout for blocked system workers detection. */ + private Long sysWorkerBlockedTimeout = DFLT_SYS_WORKER_BLOCKED_TIMEOUT; + /** Failure detection timeout for client nodes. */ private Long clientFailureDetectionTimeout = DFLT_CLIENT_FAILURE_DETECTION_TIMEOUT; @@ -493,6 +530,12 @@ public class IgniteConfiguration { /** Failure handler. */ private FailureHandler failureHnd; + /** Communication failure resolver */ + private CommunicationFailureResolver commFailureRslvr; + + /** SQL schemas to be created on node start. */ + private String[] sqlSchemas; + /** * Creates valid grid configuration with all default values. */ @@ -520,6 +563,8 @@ public IgniteConfiguration(IgniteConfiguration cfg) { loadBalancingSpi = cfg.getLoadBalancingSpi(); indexingSpi = cfg.getIndexingSpi(); + commFailureRslvr = cfg.getCommunicationFailureResolver(); + /* * Order alphabetically for maintenance purposes. */ @@ -582,6 +627,10 @@ public IgniteConfiguration(IgniteConfiguration cfg) { pubPoolSize = cfg.getPublicThreadPoolSize(); qryPoolSize = cfg.getQueryThreadPoolSize(); rebalanceThreadPoolSize = cfg.getRebalanceThreadPoolSize(); + rebalanceTimeout = cfg.getRebalanceTimeout(); + rebalanceBatchesPrefetchCnt = cfg.getRebalanceBatchesPrefetchCount(); + rebalanceThrottle = cfg.getRebalanceThrottle(); + rebalanceBatchSize = cfg.getRebalanceBatchSize(); segChkFreq = cfg.getSegmentCheckFrequency(); segPlc = cfg.getSegmentationPolicy(); segResolveAttempts = cfg.getSegmentationResolveAttempts(); @@ -589,12 +638,14 @@ public IgniteConfiguration(IgniteConfiguration cfg) { sndRetryCnt = cfg.getNetworkSendRetryCount(); sndRetryDelay = cfg.getNetworkSendRetryDelay(); sqlConnCfg = cfg.getSqlConnectorConfiguration(); + sqlSchemas = cfg.getSqlSchemas(); sslCtxFactory = cfg.getSslContextFactory(); storeSesLsnrs = cfg.getCacheStoreSessionListenerFactories(); stripedPoolSize = cfg.getStripedPoolSize(); svcCfgs = cfg.getServiceConfiguration(); svcPoolSize = cfg.getServiceThreadPoolSize(); sysPoolSize = cfg.getSystemThreadPoolSize(); + sysWorkerBlockedTimeout = cfg.getSystemWorkerBlockedTimeout(); timeSrvPortBase = cfg.getTimeServerPortBase(); timeSrvPortRange = cfg.getTimeServerPortRange(); txCfg = cfg.getTransactionConfiguration(); @@ -606,6 +657,23 @@ public IgniteConfiguration(IgniteConfiguration cfg) { authEnabled = cfg.isAuthenticationEnabled(); } + /** + * @return Communication failure resovler. + */ + public CommunicationFailureResolver getCommunicationFailureResolver() { + return commFailureRslvr; + } + + /** + * @param commFailureRslvr Communication failure resovler. + * @return {@code this} instance. + */ + public IgniteConfiguration setCommunicationFailureResolver(CommunicationFailureResolver commFailureRslvr) { + this.commFailureRslvr = commFailureRslvr; + + return this; + } + /** * Gets optional grid name. Returns {@code null} if non-default grid name was not * provided. @@ -1429,6 +1497,29 @@ public IgniteConfiguration setNetworkTimeout(long netTimeout) { return this; } + /** + * Compression level of internal network messages. + *

+ * If not provided, then default value + * Deflater.BEST_SPEED is used. + * + * @return Network messages default compression level. + */ + public int getNetworkCompressionLevel() { + return netCompressionLevel; + } + + /** + * Compression level for internal network messages. + *

+ * If not provided, then default value + * Deflater.BEST_SPEED is used. + * + */ + public void setNetworkCompressionLevel(int netCompressionLevel) { + this.netCompressionLevel = netCompressionLevel; + } + /** * Interval in milliseconds between message send retries. *

@@ -1511,6 +1602,133 @@ public IgniteConfiguration setRebalanceThreadPoolSize(int rebalanceThreadPoolSiz return this; } + /** + * Rebalance timeout for supply and demand messages in milliseconds. The {@code rebalanceTimeout} parameter + * specifies how long a message will stay in a receiving queue, waiting for other ordered messages that are + * ordered ahead of it to arrive will be processed. If timeout expires, then all messages that have not arrived + * before this message will be skipped. If an expired supply (demand) message actually does arrive, it will be + * ignored. + *

+ * Default value is defined by {@link IgniteConfiguration#DFLT_REBALANCE_TIMEOUT}, if {@code 0} than the + * {@link IgniteConfiguration#getNetworkTimeout()} will be used instead. + * + * @return Rebalance message timeout in milliseconds. + */ + public long getRebalanceTimeout() { + return rebalanceTimeout; + } + + /** + * Rebalance timeout for supply and demand messages in milliseconds. The {@code rebalanceTimeout} parameter + * specifies how long a message will stay in a receiving queue, waiting for other ordered messages that are + * ordered ahead of it to arrive will be processed. If timeout expires, then all messages that have not arrived + * before this message will be skipped. If an expired supply (demand) message actually does arrive, it will be + * ignored. + *

+ * Default value is defined by {@link IgniteConfiguration#DFLT_REBALANCE_TIMEOUT}, if {@code 0} than the + * {@link IgniteConfiguration#getNetworkTimeout()} will be used instead. + * + * @param rebalanceTimeout Rebalance message timeout in milliseconds. + * @return {@code this} for chaining. + */ + public IgniteConfiguration setRebalanceTimeout(long rebalanceTimeout) { + this.rebalanceTimeout = rebalanceTimeout; + + return this; + } + + /** + * The number of batches generated by supply node at rebalancing procedure start. To gain better rebalancing + * performance supplier node can provide more than one batch at rebalancing start and provide one new to each + * next demand request. + *

+ * Default value is defined by {@link IgniteConfiguration#DFLT_REBALANCE_BATCHES_PREFETCH_COUNT}, minimum value is {@code 1}. + * + * @return The number of batches prefetch count. + */ + public long getRebalanceBatchesPrefetchCount() { + return rebalanceBatchesPrefetchCnt; + } + + /** + * The number of batches generated by supply node at rebalancing procedure start. To gain better rebalancing + * performance supplier node can provide more than one batch at rebalancing start and provide one new to each + * next demand request. + *

+ * Default value is defined by {@link IgniteConfiguration#DFLT_REBALANCE_BATCHES_PREFETCH_COUNT}, minimum value is {@code 1}. + * + * @param rebalanceBatchesCnt The number of batches prefetch count. + * @return {@code this} for chaining. + */ + public IgniteConfiguration setRebalanceBatchesPrefetchCount(long rebalanceBatchesCnt) { + this.rebalanceBatchesPrefetchCnt = rebalanceBatchesCnt; + + return this; + } + + /** + * Time in milliseconds to wait between rebalance messages to avoid overloading of CPU or network. + * When rebalancing large data sets, the CPU or network can get over-consumed with rebalancing messages, + * which consecutively may slow down the application performance. This parameter helps tune + * the amount of time to wait between rebalance messages to make sure that rebalancing process + * does not have any negative performance impact. Note that application will continue to work + * properly while rebalancing is still in progress. + *

+ * Value of {@code 0} means that throttling is disabled. By default throttling is disabled - + * the default is defined by {@link IgniteConfiguration#DFLT_REBALANCE_THROTTLE} constant. + * + * @return Time in milliseconds to wait between rebalance messages, {@code 0} to disable throttling. + */ + public long getRebalanceThrottle() { + return rebalanceThrottle; + } + + /** + * Time in milliseconds to wait between rebalance messages to avoid overloading of CPU or network. When rebalancing + * large data sets, the CPU or network can get over-consumed with rebalancing messages, which consecutively may slow + * down the application performance. This parameter helps tune the amount of time to wait between rebalance messages + * to make sure that rebalancing process does not have any negative performance impact. Note that application will + * continue to work properly while rebalancing is still in progress. + *

+ * Value of {@code 0} means that throttling is disabled. By default throttling is disabled - + * the default is defined by {@link IgniteConfiguration#DFLT_REBALANCE_THROTTLE} constant. + * + * @param rebalanceThrottle Time in milliseconds to wait between rebalance messages, {@code 0} to disable throttling. + * @return {@code this} for chaining. + */ + public IgniteConfiguration setRebalanceThrottle(long rebalanceThrottle) { + this.rebalanceThrottle = rebalanceThrottle; + + return this; + } + + /** + * The supply message size in bytes to be loaded within a single rebalance batch. The data balancing algorithm + * splits all the cache data entries on supply node into multiple batches prior to sending them to the demand node. + *

+ * Default value is defined by {@link IgniteConfiguration#DFLT_REBALANCE_BATCH_SIZE}. + * + * @return Rebalance message size in bytes. + */ + public int getRebalanceBatchSize() { + return rebalanceBatchSize; + } + + /** + * The supply message size in bytes to be loaded within a single rebalance batch. The data balancing algorithm + * splits all the cache data entries on supply node into multiple batches prior to sending them to the demand node. + *

+ * Default value is defined by {@link IgniteConfiguration#DFLT_REBALANCE_BATCH_SIZE}. + * + * @param rebalanceBatchSize Rebalance message size in bytes. + * @return {@code this} for chaining. + */ + public IgniteConfiguration setRebalanceBatchSize(int rebalanceBatchSize) { + this.rebalanceBatchSize = rebalanceBatchSize; + + return this; + } + /** * Returns a collection of life-cycle beans. These beans will be automatically * notified of grid life-cycle events. Use life-cycle beans whenever you @@ -1936,6 +2154,31 @@ public IgniteConfiguration setFailureDetectionTimeout(long failureDetectionTimeo return this; } + /** + * Returns maximum inactivity period for system worker. When this value is exceeded, worker is considered blocked + * with consequent critical failure handler invocation. + * + * @see #setSystemWorkerBlockedTimeout(long) + * @return Maximum inactivity period for system worker in milliseconds. + */ + public Long getSystemWorkerBlockedTimeout() { + return sysWorkerBlockedTimeout; + } + + /** + * Sets maximum inactivity period for system worker. When this value is exceeded, worker is considered blocked + * with consequent critical failure handler invocation. + * + * @see #setFailureHandler(FailureHandler) + * @param sysWorkerBlockedTimeout Maximum inactivity period for system worker in milliseconds. + * @return {@code this} for chaining. + */ + public IgniteConfiguration setSystemWorkerBlockedTimeout(long sysWorkerBlockedTimeout) { + this.sysWorkerBlockedTimeout = sysWorkerBlockedTimeout; + + return this; + } + /** * Should return fully configured load balancing SPI implementation. If not provided, * {@link RoundRobinLoadBalancingSpi} will be used. @@ -2979,6 +3222,35 @@ public IgniteConfiguration setAuthenticationEnabled(boolean authEnabled) { return this; } + /** + * Gets SQL schemas to be created on node startup. + *

+ * See {@link #setSqlSchemas(String...)} for more information. + * + * @return SQL schemas to be created on node startup. + */ + public String[] getSqlSchemas() { + return sqlSchemas; + } + + /** + * Sets SQL schemas to be created on node startup. Schemas are created on local node only and are not propagated + * to other cluster nodes. Created schemas cannot be dropped. + *

+ * By default schema names are case-insensitive, i.e. {@code my_schema} and {@code My_Schema} represents the same + * object. Use quotes to enforce case sensitivity (e.g. {@code "My_Schema"}). + *

+ * Property is ignored if {@code ignite-indexing} module is not in classpath. + * + * @param sqlSchemas SQL schemas to be created on node startup. + * @return {@code this} for chaining. + */ + public IgniteConfiguration setSqlSchemas(String... sqlSchemas) { + this.sqlSchemas = sqlSchemas; + + return this; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(IgniteConfiguration.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/configuration/TransactionConfiguration.java b/modules/core/src/main/java/org/apache/ignite/configuration/TransactionConfiguration.java index 0063afc158fd1..087332c83b810 100644 --- a/modules/core/src/main/java/org/apache/ignite/configuration/TransactionConfiguration.java +++ b/modules/core/src/main/java/org/apache/ignite/configuration/TransactionConfiguration.java @@ -19,6 +19,8 @@ import java.io.Serializable; import javax.cache.configuration.Factory; +import org.apache.ignite.internal.util.TransientSerializable; +import org.apache.ignite.lang.IgniteProductVersion; import org.apache.ignite.transactions.Transaction; import org.apache.ignite.transactions.TransactionConcurrency; import org.apache.ignite.transactions.TransactionIsolation; @@ -26,7 +28,11 @@ /** * Transactions configuration. */ +@TransientSerializable(methodName = "transientSerializableFields") public class TransactionConfiguration implements Serializable { + /** */ + private static final IgniteProductVersion TX_PME_TIMEOUT_SINCE = IgniteProductVersion.fromString("2.5.1"); + /** */ private static final long serialVersionUID = 0L; @@ -42,6 +48,9 @@ public class TransactionConfiguration implements Serializable { /** Default transaction timeout. */ public static final long DFLT_TRANSACTION_TIMEOUT = 0; + /** Transaction timeout on partition map synchronization. */ + public static final long TX_TIMEOUT_ON_PARTITION_MAP_EXCHANGE = 0; + /** Default size of pessimistic transactions log. */ public static final int DFLT_PESSIMISTIC_TX_LOG_LINGER = 10_000; @@ -57,6 +66,9 @@ public class TransactionConfiguration implements Serializable { /** Default transaction timeout. */ private long dfltTxTimeout = DFLT_TRANSACTION_TIMEOUT; + /** Transaction timeout on partition map exchange. */ + private volatile long txTimeoutOnPartitionMapExchange = TX_TIMEOUT_ON_PARTITION_MAP_EXCHANGE; + /** Pessimistic tx log size. */ private int pessimisticTxLogSize; @@ -89,6 +101,7 @@ public TransactionConfiguration(TransactionConfiguration cfg) { dfltConcurrency = cfg.getDefaultTxConcurrency(); dfltIsolation = cfg.getDefaultTxIsolation(); dfltTxTimeout = cfg.getDefaultTxTimeout(); + txTimeoutOnPartitionMapExchange = cfg.getTxTimeoutOnPartitionMapExchange(); pessimisticTxLogLinger = cfg.getPessimisticTxLogLinger(); pessimisticTxLogSize = cfg.getPessimisticTxLogSize(); txSerEnabled = cfg.isTxSerializableEnabled(); @@ -191,6 +204,40 @@ public TransactionConfiguration setDefaultTxTimeout(long dfltTxTimeout) { return this; } + /** + * Some Ignite operations provoke partition map exchange process within Ignite to ensure the partitions distribution + * state is synchronized cluster-wide. Topology update events and a start of a new distributed cache are examples + * of those operations. + *

+ * When the partition map exchange starts, Ignite acquires a global lock at a particular stage. The lock can't be + * obtained until pending transactions are running in parallel. If there is a transaction that runs for a while, + * then it will prevent the partition map exchange process from the start freezing some operations such as a new + * node join process. + *

+ * This property allows to rollback such long transactions to let Ignite acquire the lock faster and initiate the + * partition map exchange process. The timeout is enforced only at the time of the partition map exchange process. + *

+ * If not set, default value is {@link #TX_TIMEOUT_ON_PARTITION_MAP_EXCHANGE} which means transactions will never be + * rolled back on partition map exchange. + * + * @return Transaction timeout for partition map synchronization in milliseconds. + */ + public long getTxTimeoutOnPartitionMapExchange() { + return txTimeoutOnPartitionMapExchange; + } + + /** + * Sets the transaction timeout that will be enforced if the partition map exchange process starts. + * + * @param txTimeoutOnPartitionMapExchange Transaction timeout value in milliseconds. + * @return {@code this} for chaining. + */ + public TransactionConfiguration setTxTimeoutOnPartitionMapExchange(long txTimeoutOnPartitionMapExchange) { + this.txTimeoutOnPartitionMapExchange = txTimeoutOnPartitionMapExchange; + + return this; + } + /** * Gets size of pessimistic transactions log stored on node in order to recover transaction commit if originating * node has left grid before it has sent all messages to transaction nodes. @@ -340,4 +387,18 @@ public TransactionConfiguration setUseJtaSynchronization(boolean useJtaSync) { return this; } + + /** + * Excludes incompatible fields from serialization/deserialization process. + * + * @param ver Sender/Receiver node version. + * @return Array of excluded from serialization/deserialization fields. + */ + @SuppressWarnings("unused") + private static String[] transientSerializableFields(IgniteProductVersion ver) { + if (TX_PME_TIMEOUT_SINCE.compareToIgnoreTimestamp(ver) >= 0) + return new String[] {"txTimeoutOnPartitionMapExchange"}; + + return null; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/events/CacheEvent.java b/modules/core/src/main/java/org/apache/ignite/events/CacheEvent.java index 5aa9d0663b85d..9a437f7a369dd 100644 --- a/modules/core/src/main/java/org/apache/ignite/events/CacheEvent.java +++ b/modules/core/src/main/java/org/apache/ignite/events/CacheEvent.java @@ -140,6 +140,10 @@ public class CacheEvent extends EventAdapter { @GridToStringInclude private String taskName; + /** Transaction label. */ + @GridToStringInclude + private String txLbl; + /** * Constructs cache event. * @@ -152,6 +156,7 @@ public class CacheEvent extends EventAdapter { * @param near Flag indicating whether event happened on {@code near} or {@code partitioned} cache. * @param key Cache key. * @param xid Transaction ID. + * @param txLbl Transaction label. * @param lockId Lock ID. * @param newVal New value. * @param hasNewVal Flag indicating whether new value is present in case if we @@ -163,7 +168,7 @@ public class CacheEvent extends EventAdapter { * @param cloClsName Closure class name. */ public CacheEvent(String cacheName, ClusterNode node, @Nullable ClusterNode evtNode, String msg, int type, int part, - boolean near, Object key, IgniteUuid xid, Object lockId, Object newVal, boolean hasNewVal, + boolean near, Object key, IgniteUuid xid, String txLbl, Object lockId, Object newVal, boolean hasNewVal, Object oldVal, boolean hasOldVal, UUID subjId, String cloClsName, String taskName) { super(node, msg, type); this.cacheName = cacheName; @@ -172,6 +177,7 @@ public CacheEvent(String cacheName, ClusterNode node, @Nullable ClusterNode evtN this.near = near; this.key = key; this.xid = xid; + this.txLbl = txLbl; this.lockId = lockId; this.newVal = newVal; this.hasNewVal = hasNewVal; @@ -229,7 +235,7 @@ public K key() { } /** - * ID of surrounding cache cache transaction or null if there is + * ID of surrounding cache transaction or null if there is * no surrounding transaction. * * @return ID of surrounding cache transaction. @@ -320,6 +326,16 @@ public String taskName() { return taskName; } + /** + * Label of surrounding cache transaction or null if there either is + * no surrounding transaction or label was not set. + * + * @return Label of surrounding cache transaction. + */ + public String txLabel() { + return txLbl; + } + /** {@inheritDoc} */ @Override public String shortDisplay() { return name() + ": near=" + near + ", key=" + key + ", hasNewVal=" + hasNewVal + ", hasOldVal=" + hasOldVal + diff --git a/modules/core/src/main/java/org/apache/ignite/events/CacheRebalancingEvent.java b/modules/core/src/main/java/org/apache/ignite/events/CacheRebalancingEvent.java index 5ff424b123a57..22f8d46bc1f94 100644 --- a/modules/core/src/main/java/org/apache/ignite/events/CacheRebalancingEvent.java +++ b/modules/core/src/main/java/org/apache/ignite/events/CacheRebalancingEvent.java @@ -62,6 +62,8 @@ * @see EventType#EVT_CACHE_REBALANCE_STARTED * @see EventType#EVT_CACHE_REBALANCE_STOPPED * @see EventType#EVT_CACHE_REBALANCE_PART_DATA_LOST + * @see EventType#EVT_CACHE_REBALANCE_PART_SUPPLIED + * @see EventType#EVT_CACHE_REBALANCE_PART_MISSED */ public class CacheRebalancingEvent extends EventAdapter { /** */ @@ -183,4 +185,4 @@ public long discoveryTimestamp() { "type", name(), "tstamp", timestamp()); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/events/DiscoveryEvent.java b/modules/core/src/main/java/org/apache/ignite/events/DiscoveryEvent.java index 09f23bca47bfd..f34f786ac56eb 100644 --- a/modules/core/src/main/java/org/apache/ignite/events/DiscoveryEvent.java +++ b/modules/core/src/main/java/org/apache/ignite/events/DiscoveryEvent.java @@ -22,6 +22,7 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.Nullable; /** * Grid discovery event. @@ -75,6 +76,9 @@ public class DiscoveryEvent extends EventAdapter { /** Collection of nodes corresponding to topology version. */ private Collection topSnapshot; + /** Template to generate {@link #message()} lazily. Will be joined with {@link #eventNode()} converted to string. */ + private volatile String msgTemplate; + /** {@inheritDoc} */ @Override public String shortDisplay() { return name() + ": id8=" + U.id8(evtNode.id()) + ", ip=" + F.first(evtNode.addresses()); @@ -154,6 +158,39 @@ public void topologySnapshot(long topVer, Collection topSnapshot) { this.topSnapshot = topSnapshot; } + /** + * Template to generate {@link #message()} lazily. Will be joined with {@link #eventNode()} converted to string. + * + * @param msgTemplate Template. + */ + public void messageTemplate(String msgTemplate) { + this.msgTemplate = msgTemplate; + } + + /** {@inheritDoc} */ + @Nullable @Override public String message() { + String msg = super.message(); + + if (msg != null) + return msg; + + if (msgTemplate == null) + return null; + + synchronized (this) { + msg = super.message(); + + if (msg != null) + return msg; + + msg = msgTemplate + eventNode(); + + message(msg); + } + + return msg; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(DiscoveryEvent.class, this, diff --git a/modules/core/src/main/java/org/apache/ignite/events/EventType.java b/modules/core/src/main/java/org/apache/ignite/events/EventType.java index b0b410a49f358..44379abce29bc 100644 --- a/modules/core/src/main/java/org/apache/ignite/events/EventType.java +++ b/modules/core/src/main/java/org/apache/ignite/events/EventType.java @@ -244,6 +244,16 @@ public interface EventType { */ public static final int EVT_TASK_REDUCED = 25; + /** + * Built-in event type: Visor or Web Console management task started. + *

+ * NOTE: all types in range from 1 to 1000 are reserved for + * internal Ignite events and should not be used by user-defined events. + * + * @see TaskEvent + */ + public static final int EVT_MANAGEMENT_TASK_STARTED = 26; + /** * Built-in event type: non-task class deployed. *

@@ -574,6 +584,26 @@ public interface EventType { */ public static final int EVT_CACHE_REBALANCE_PART_DATA_LOST = 86; + /** + * Built-in event type: cache partition was fully sent to remote node. + *

+ * NOTE: all types in range from 1 to 1000 are reserved for + * internal Ignite events and should not be used by user-defined events. + * + * @see CacheRebalancingEvent + */ + public static final int EVT_CACHE_REBALANCE_PART_SUPPLIED = 87; + + /** + * Built-in event type: cache partition was not sent to remote node. + *

+ * NOTE: all types in range from 1 to 1000 are reserved for + * internal Ignite events and should not be used by user-defined events. + * + * @see CacheRebalancingEvent + */ + public static final int EVT_CACHE_REBALANCE_PART_MISSED = 88; + /** * Built-in event type: query executed. *

@@ -931,7 +961,9 @@ public interface EventType { EVT_CACHE_REBALANCE_PART_UNLOADED, EVT_CACHE_REBALANCE_OBJECT_LOADED, EVT_CACHE_REBALANCE_OBJECT_UNLOADED, - EVT_CACHE_REBALANCE_PART_DATA_LOST + EVT_CACHE_REBALANCE_PART_DATA_LOST, + EVT_CACHE_REBALANCE_PART_SUPPLIED, + EVT_CACHE_REBALANCE_PART_MISSED }; /** diff --git a/modules/core/src/main/java/org/apache/ignite/failure/AbstractFailureHandler.java b/modules/core/src/main/java/org/apache/ignite/failure/AbstractFailureHandler.java new file mode 100644 index 0000000000000..79b1f8f6ea591 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/failure/AbstractFailureHandler.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.failure; + +import java.util.Collections; +import java.util.EnumSet; +import java.util.Set; +import org.apache.ignite.Ignite; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; + +import static org.apache.ignite.failure.FailureType.SYSTEM_CRITICAL_OPERATION_TIMEOUT; +import static org.apache.ignite.failure.FailureType.SYSTEM_WORKER_BLOCKED; + +/** + * Abstract superclass for {@link FailureHandler} implementations. + * Maintains a set of ignored failure types. Failure handler will not invalidate kernal context for this failures + * and will not handle it. + */ +public abstract class AbstractFailureHandler implements FailureHandler { + /** */ + @GridToStringInclude + private Set ignoredFailureTypes = + Collections.unmodifiableSet(EnumSet.of(SYSTEM_WORKER_BLOCKED, SYSTEM_CRITICAL_OPERATION_TIMEOUT)); + + /** + * Sets failure types that must be ignored by failure handler. + * + * @param failureTypes Set of failure type that must be ignored. + * @see FailureType + */ + public void setIgnoredFailureTypes(Set failureTypes) { + ignoredFailureTypes = Collections.unmodifiableSet(failureTypes); + } + + /** + * Returns unmodifiable set of ignored failure types. + */ + public Set getIgnoredFailureTypes() { + return ignoredFailureTypes; + } + + /** {@inheritDoc} */ + public boolean onFailure(Ignite ignite, FailureContext failureCtx) { + return !ignoredFailureTypes.contains(failureCtx.type()) && handle(ignite, failureCtx); + } + + /** + * Actual failure handling. This method is not called for ignored failure types. + * + * @see #setIgnoredFailureTypes(Set). + * @see FailureHandler#onFailure(Ignite, FailureContext). + */ + protected abstract boolean handle(Ignite ignite, FailureContext failureCtx); + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(AbstractFailureHandler.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/failure/FailureType.java b/modules/core/src/main/java/org/apache/ignite/failure/FailureType.java index d933420fb1aa6..114e432e1ef73 100644 --- a/modules/core/src/main/java/org/apache/ignite/failure/FailureType.java +++ b/modules/core/src/main/java/org/apache/ignite/failure/FailureType.java @@ -27,6 +27,12 @@ public enum FailureType { /** System worker termination. */ SYSTEM_WORKER_TERMINATION, + /** System worker has not updated its heartbeat for a long time. */ + SYSTEM_WORKER_BLOCKED, + /** Critical error - error which leads to the system's inoperability. */ - CRITICAL_ERROR + CRITICAL_ERROR, + + /** System-critical operation has been timed out. */ + SYSTEM_CRITICAL_OPERATION_TIMEOUT } diff --git a/modules/core/src/main/java/org/apache/ignite/failure/NoOpFailureHandler.java b/modules/core/src/main/java/org/apache/ignite/failure/NoOpFailureHandler.java index 2ec645eb1d5ea..67c258fe762bc 100644 --- a/modules/core/src/main/java/org/apache/ignite/failure/NoOpFailureHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/failure/NoOpFailureHandler.java @@ -18,13 +18,19 @@ package org.apache.ignite.failure; import org.apache.ignite.Ignite; +import org.apache.ignite.internal.util.typedef.internal.S; /** * Just ignores any failure. It's useful for tests and debugging. */ -public class NoOpFailureHandler implements FailureHandler { +public class NoOpFailureHandler extends AbstractFailureHandler { /** {@inheritDoc} */ - @Override public boolean onFailure(Ignite ignite, FailureContext failureCtx) { + @Override protected boolean handle(Ignite ignite, FailureContext failureCtx) { return false; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(NoOpFailureHandler.class, this, "super", super.toString()); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/failure/RestartProcessFailureHandler.java b/modules/core/src/main/java/org/apache/ignite/failure/RestartProcessFailureHandler.java index 4f4432fc924c9..299b8afbb7b57 100644 --- a/modules/core/src/main/java/org/apache/ignite/failure/RestartProcessFailureHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/failure/RestartProcessFailureHandler.java @@ -20,15 +20,16 @@ import org.apache.ignite.Ignite; import org.apache.ignite.Ignition; import org.apache.ignite.internal.util.typedef.G; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; /** * This handler could be used only with ignite.(sh|bat) script. * Process will be terminated using {@link Ignition#restart(boolean)} call. */ -public class RestartProcessFailureHandler implements FailureHandler { +public class RestartProcessFailureHandler extends AbstractFailureHandler { /** {@inheritDoc} */ - @Override public boolean onFailure(Ignite ignite, FailureContext failureCtx) { + @Override protected boolean handle(Ignite ignite, FailureContext failureCtx) { new Thread( new Runnable() { @Override public void run() { @@ -42,4 +43,9 @@ public class RestartProcessFailureHandler implements FailureHandler { return true; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(RestartProcessFailureHandler.class, this, "super", super.toString()); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/failure/StopNodeFailureHandler.java b/modules/core/src/main/java/org/apache/ignite/failure/StopNodeFailureHandler.java index f05cc760f2a7d..4721c5023523a 100644 --- a/modules/core/src/main/java/org/apache/ignite/failure/StopNodeFailureHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/failure/StopNodeFailureHandler.java @@ -19,14 +19,15 @@ import org.apache.ignite.Ignite; import org.apache.ignite.internal.IgnitionEx; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; /** * Handler will stop node in case of critical error using {@code IgnitionEx.stop(nodeName, true, true)} call. */ -public class StopNodeFailureHandler implements FailureHandler { +public class StopNodeFailureHandler extends AbstractFailureHandler { /** {@inheritDoc} */ - @Override public boolean onFailure(Ignite ignite, FailureContext failureCtx) { + @Override public boolean handle(Ignite ignite, FailureContext failureCtx) { new Thread( new Runnable() { @Override public void run() { @@ -40,4 +41,9 @@ public class StopNodeFailureHandler implements FailureHandler { return true; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(StopNodeFailureHandler.class, this, "super", super.toString()); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/failure/StopNodeOrHaltFailureHandler.java b/modules/core/src/main/java/org/apache/ignite/failure/StopNodeOrHaltFailureHandler.java index 4f7440616c097..023daa88806eb 100644 --- a/modules/core/src/main/java/org/apache/ignite/failure/StopNodeOrHaltFailureHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/failure/StopNodeOrHaltFailureHandler.java @@ -23,6 +23,7 @@ import org.apache.ignite.IgniteLogger; import org.apache.ignite.Ignition; import org.apache.ignite.internal.IgnitionEx; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; /** @@ -30,7 +31,7 @@ * If node can't be stopped during provided {@code timeout} or {@code tryStop} value is {@code false} * then JVM process will be terminated forcibly using {@code Runtime.getRuntime().halt()}. */ -public class StopNodeOrHaltFailureHandler implements FailureHandler { +public class StopNodeOrHaltFailureHandler extends AbstractFailureHandler { /** Try stop. */ private final boolean tryStop; @@ -54,7 +55,7 @@ public StopNodeOrHaltFailureHandler(boolean tryStop, long timeout) { } /** {@inheritDoc} */ - @Override public boolean onFailure(Ignite ignite, FailureContext failureCtx) { + @Override protected boolean handle(Ignite ignite, FailureContext failureCtx) { IgniteLogger log = ignite.log(); if (tryStop) { @@ -92,11 +93,16 @@ public StopNodeOrHaltFailureHandler(boolean tryStop, long timeout) { ).start(); } else { - U.error(log, "JVM will be halted immediately on ignite failure: [failureCtx=" + failureCtx + ']'); + U.error(log, "JVM will be halted immediately due to the failure: [failureCtx=" + failureCtx + ']'); Runtime.getRuntime().halt(Ignition.KILL_EXIT_CODE); } return true; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(StopNodeOrHaltFailureHandler.class, this, "super", super.toString()); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/ClusterLocalNodeMetricsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/ClusterLocalNodeMetricsMXBeanImpl.java index a242345df0fe5..c3bcf803f70ab 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/ClusterLocalNodeMetricsMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/ClusterLocalNodeMetricsMXBeanImpl.java @@ -18,9 +18,11 @@ package org.apache.ignite.internal; import java.util.Collections; +import java.util.List; import java.util.Set; import java.util.TreeSet; import java.util.UUID; +import org.apache.ignite.cluster.BaselineNode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; import org.apache.ignite.internal.util.typedef.internal.S; @@ -332,6 +334,30 @@ public ClusterLocalNodeMetricsMXBeanImpl(GridDiscoveryManager discoMgr) { return node.metrics().getTotalNodes(); } + /** {@inheritDoc} */ + @Override public long getCurrentPmeDuration() { + return node.metrics().getCurrentPmeDuration(); + } + + /** {@inheritDoc} */ + @Override public int getTotalBaselineNodes() { + if (!node.isClient() && !node.isDaemon()) { + List baselineNodes = discoMgr.baselineNodes(discoMgr.topologyVersionEx()); + + if (baselineNodes != null) + for (BaselineNode baselineNode : baselineNodes) + if (baselineNode.consistentId().equals(node.consistentId())) + return 1; + } + + return 0; + } + + /** {@inheritDoc} */ + @Override public int getActiveBaselineNodes() { + return getTotalBaselineNodes(); + } + /** {@inheritDoc} */ @Override public int getTotalServerNodes() { return !node.isClient() ? 1 : 0; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/ClusterMetricsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/ClusterMetricsMXBeanImpl.java index e09ad3c110dc9..316ea71ca749c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/ClusterMetricsMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/ClusterMetricsMXBeanImpl.java @@ -18,10 +18,13 @@ package org.apache.ignite.internal; import java.util.ArrayList; +import java.util.Collection; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.TreeSet; import java.util.UUID; +import org.apache.ignite.cluster.BaselineNode; import org.apache.ignite.cluster.ClusterGroup; import org.apache.ignite.cluster.ClusterMetrics; import org.apache.ignite.cluster.ClusterNode; @@ -358,6 +361,40 @@ private ClusterMetrics metrics() { return metrics().getTotalNodes(); } + /** {@inheritDoc} */ + @Override public long getCurrentPmeDuration() { + return metrics().getCurrentPmeDuration(); + } + + /** {@inheritDoc} */ + @Override public int getTotalBaselineNodes() { + Collection baselineNodes = cluster.ignite().cluster().currentBaselineTopology(); + + return baselineNodes != null ? baselineNodes.size() : 0; + } + + /** {@inheritDoc} */ + @Override public int getActiveBaselineNodes() { + Collection baselineNodes = cluster.ignite().cluster().currentBaselineTopology(); + + if (baselineNodes != null && !baselineNodes.isEmpty()) { + Set bltIds = new HashSet<>(baselineNodes.size()); + + for (BaselineNode baselineNode : baselineNodes) + bltIds.add(baselineNode.consistentId()); + + int count = 0; + + for (ClusterNode node : cluster.forServers().nodes()) + if (bltIds.contains(node.consistentId())) + count++; + + return count; + } + + return 0; + } + /** {@inheritDoc} */ @Override public int getTotalServerNodes() { return cluster.forServers().nodes().size(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/ClusterMetricsSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/ClusterMetricsSnapshot.java index 60f26a35b8b81..47fea3a9926c3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/ClusterMetricsSnapshot.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/ClusterMetricsSnapshot.java @@ -92,7 +92,8 @@ public class ClusterMetricsSnapshot implements ClusterMetrics { 8/*received bytes count*/ + 4/*outbound messages queue size*/ + 4/*total nodes*/ + - 8/*total jobs execution time*/; + 8/*total jobs execution time*/ + + 8/*current PME time*/; /** */ private long lastUpdateTime = -1; @@ -256,6 +257,9 @@ public class ClusterMetricsSnapshot implements ClusterMetrics { /** */ private long totalJobsExecTime = -1; + /** */ + private long currentPmeDuration = -1; + /** * Create empty snapshot. */ @@ -329,6 +333,7 @@ public ClusterMetricsSnapshot(ClusterGroup p) { outMesQueueSize = 0; heapTotal = 0; totalNodes = nodes.size(); + currentPmeDuration = 0; for (ClusterNode node : nodes) { ClusterMetrics m = node.metrics(); @@ -405,6 +410,8 @@ public ClusterMetricsSnapshot(ClusterGroup p) { outMesQueueSize += m.getOutboundMessagesQueueSize(); avgLoad += m.getCurrentCpuLoad(); + + currentPmeDuration = max(currentPmeDuration, m.getCurrentPmeDuration()); } curJobExecTime /= size; @@ -960,6 +967,11 @@ public void setCurrentIdleTime(long curIdleTime) { return totalNodes; } + /** {@inheritDoc} */ + @Override public long getCurrentPmeDuration() { + return currentPmeDuration; + } + /** * Sets available processors. * @@ -1194,6 +1206,16 @@ public void setTotalNodes(int totalNodes) { this.totalNodes = totalNodes; } + + /** + * Sets execution duration for current partition map exchange. + * + * @param currentPmeDuration Execution duration for current partition map exchange. + */ + public void setCurrentPmeDuration(long currentPmeDuration) { + this.currentPmeDuration = currentPmeDuration; + } + /** * @param neighborhood Cluster neighborhood. * @return CPU count. @@ -1346,6 +1368,7 @@ public static int serialize(byte[] data, int off, ClusterMetrics metrics) { buf.putInt(metrics.getOutboundMessagesQueueSize()); buf.putInt(metrics.getTotalNodes()); buf.putLong(metrics.getTotalJobsExecutionTime()); + buf.putLong(metrics.getCurrentPmeDuration()); assert !buf.hasRemaining() : "Invalid metrics size [expected=" + METRICS_SIZE + ", actual=" + (buf.position() - off) + ']'; @@ -1428,6 +1451,11 @@ public static ClusterMetrics deserialize(byte[] data, int off) { else metrics.setTotalJobsExecutionTime(0); + if (buf.remaining() >= 8) + metrics.setCurrentPmeDuration(buf.getLong()); + else + metrics.setCurrentPmeDuration(0); + return metrics; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java b/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java index 2cb3dfad5e487..6ce9001138b1b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/ComputeTaskInternalFuture.java @@ -234,7 +234,7 @@ public ComputeTaskSession getTaskSession() { /** {@inheritDoc} */ @Override public boolean cancel() throws IgniteCheckedException { - ctx.security().authorize(ses.getTaskName(), SecurityPermission.TASK_CANCEL, null); + ctx.security().authorize(ses.getTaskName(), SecurityPermission.TASK_CANCEL); if (onCancelled()) { ctx.task().onCancelled(ses.getId()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridDiagnostic.java b/modules/core/src/main/java/org/apache/ignite/internal/GridDiagnostic.java index 4a33b9d089635..ac623ddf59007 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridDiagnostic.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridDiagnostic.java @@ -61,14 +61,12 @@ static void runBackgroundCheck(String igniteInstanceName, Executor exec, IgniteL if (!locHost.isReachable(REACH_TIMEOUT)) { U.warn(log, "Default local host is unreachable. This may lead to delays on " + - "grid network operations. Check your OS network setting to correct it.", - "Default local host is unreachable."); + "grid network operations. Check your OS network setting to correct it."); } } catch (IOException ignore) { U.warn(log, "Failed to perform network diagnostics. It is usually caused by serious " + - "network configuration problem. Check your OS network setting to correct it.", - "Failed to perform network diagnostics."); + "network configuration problem. Check your OS network setting to correct it."); } } }); @@ -80,14 +78,12 @@ static void runBackgroundCheck(String igniteInstanceName, Executor exec, IgniteL if (locHost.isLoopbackAddress()) { U.warn(log, "Default local host is a loopback address. This can be a sign of " + - "potential network configuration problem.", - "Default local host is a loopback address."); + "potential network configuration problem."); } } catch (IOException ignore) { U.warn(log, "Failed to perform network diagnostics. It is usually caused by serious " + - "network configuration problem. Check your OS network setting to correct it.", - "Failed to perform network diagnostics."); + "network configuration problem. Check your OS network setting to correct it."); } } }); @@ -98,8 +94,7 @@ static void runBackgroundCheck(String igniteInstanceName, Executor exec, IgniteL if (!U.isSufficientlyTestedOs()) { U.warn(log, "This operating system has been tested less rigorously: " + U.osString() + ". Our team will appreciate the feedback if you experience any problems running " + - "ignite in this environment.", - "This OS is tested less rigorously: " + U.osString()); + "ignite in this environment."); } } }); @@ -109,8 +104,7 @@ static void runBackgroundCheck(String igniteInstanceName, Executor exec, IgniteL // Fix for GG-1075. if (F.isEmpty(U.allLocalMACs())) U.warn(log, "No live network interfaces detected. If IP-multicast discovery is used - " + - "make sure to add 127.0.0.1 as a local address.", - "No live network interfaces. Add 127.0.0.1 as a local address."); + "make sure to add 127.0.0.1 as a local address."); } }); @@ -131,7 +125,7 @@ static void runBackgroundCheck(String igniteInstanceName, Executor exec, IgniteL U.warn(log, "JMX remote management is enabled but JMX port is either not set or invalid. " + "Check system property 'com.sun.management.jmxremote.port' to make sure it specifies " + - "valid TCP/IP port.", "JMX remote port is invalid - JMX management is off."); + "valid TCP/IP port."); } } }); @@ -156,4 +150,4 @@ static void runBackgroundCheck(String igniteInstanceName, Executor exec, IgniteL "Failed to start background network diagnostics.", e); } } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java index ac568f065bcc7..7d9f74e5e6273 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridEventConsumeHandler.java @@ -44,6 +44,8 @@ import org.apache.ignite.internal.processors.continuous.GridContinuousBatchAdapter; import org.apache.ignite.internal.processors.continuous.GridContinuousHandler; import org.apache.ignite.internal.processors.platform.PlatformEventFilterListener; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.P2; import org.apache.ignite.internal.util.typedef.T2; @@ -92,6 +94,9 @@ class GridEventConsumeHandler implements GridContinuousHandler { /** Listener. */ private GridLocalEventListener lsnr; + /** P2P unmarshalling future. */ + private IgniteInternalFuture p2pUnmarshalFut = new GridFinishedFuture<>(); + /** * Required by {@link Externalizable}. */ @@ -142,6 +147,21 @@ public GridEventConsumeHandler() { // No-op. } + /** + * Performs remote filter initialization. + * + * @param filter Remote filter. + * @param ctx Kernal context. + * @throws IgniteCheckedException In case if initialization failed. + */ + private void initFilter(IgnitePredicate filter, GridKernalContext ctx) throws IgniteCheckedException { + if (filter != null) + ctx.resource().injectGeneric(filter); + + if (filter instanceof PlatformEventFilterListener) + ((PlatformEventFilterListener)filter).initialize(ctx); + } + /** {@inheritDoc} */ @Override public RegisterStatus register(final UUID nodeId, final UUID routineId, final GridKernalContext ctx) throws IgniteCheckedException { @@ -152,12 +172,6 @@ public GridEventConsumeHandler() { if (cb != null) ctx.resource().injectGeneric(cb); - if (filter != null) - ctx.resource().injectGeneric(filter); - - if (filter instanceof PlatformEventFilterListener) - ((PlatformEventFilterListener)filter).initialize(ctx); - final boolean loc = nodeId.equals(ctx.localNodeId()); lsnr = new GridLocalEventListener() { @@ -257,7 +271,18 @@ public GridEventConsumeHandler() { if (F.isEmpty(types)) types = EVTS_ALL; - ctx.event().addLocalEventListener(lsnr, types); + p2pUnmarshalFut.listen((fut) -> { + if (fut.error() == null) { + try { + initFilter(filter, ctx); + } + catch (IgniteCheckedException e) { + throw F.wrap(e); + } + + ctx.event().addLocalEventListener(lsnr, types); + } + }); return RegisterStatus.REGISTERED; } @@ -382,13 +407,22 @@ public GridEventConsumeHandler() { assert ctx.config().isPeerClassLoadingEnabled(); if (filterBytes != null) { - GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, - depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); + try { + GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, + depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); - if (dep == null) - throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + if (dep == null) + throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + + filter = U.unmarshal(ctx, filterBytes, U.resolveClassLoader(dep.classLoader(), ctx.config())); - filter = U.unmarshal(ctx, filterBytes, U.resolveClassLoader(dep.classLoader(), ctx.config())); + ((GridFutureAdapter)p2pUnmarshalFut).onDone(); + } + catch (IgniteCheckedException e) { + ((GridFutureAdapter)p2pUnmarshalFut).onDone(e); + + throw e; + } } } @@ -449,6 +483,7 @@ public GridEventConsumeHandler() { boolean b = in.readBoolean(); if (b) { + p2pUnmarshalFut = new GridFutureAdapter<>(); filterBytes = U.readByteArray(in); clsName = U.readString(in); depInfo = (GridDeploymentInfo)in.readObject(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridJobCancelRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/GridJobCancelRequest.java index aaa69eaff7190..ac3a87336fc9f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridJobCancelRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridJobCancelRequest.java @@ -201,4 +201,4 @@ public boolean system() { @Override public String toString() { return S.toString(GridJobCancelRequest.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteRequest.java index 4357d1da07dce..ebfeb0153ffdc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteRequest.java @@ -664,7 +664,7 @@ public AffinityTopologyVersion getTopVer() { writer.incrementState(); case 24: - if (!writer.writeMessage("topVer", topVer)) + if (!writer.writeAffinityTopologyVersion("topVer", topVer)) return false; writer.incrementState(); @@ -885,7 +885,7 @@ public AffinityTopologyVersion getTopVer() { reader.incrementState(); case 24: - topVer = reader.readMessage("topVer"); + topVer = reader.readAffinityTopologyVersion("topVer"); if (!reader.isLastRead()) return false; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteResponse.java index 312435e922750..f052edf07d446 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridJobExecuteResponse.java @@ -282,7 +282,7 @@ public AffinityTopologyVersion getRetryTopologyVersion() { writer.incrementState(); case 6: - if (!writer.writeMessage("retry", retry)) + if (!writer.writeAffinityTopologyVersion("retry", retry)) return false; writer.incrementState(); @@ -355,7 +355,7 @@ public AffinityTopologyVersion getRetryTopologyVersion() { reader.incrementState(); case 6: - retry = reader.readMessage("retry"); + retry = reader.readAffinityTopologyVersion("retry"); if (!reader.isLastRead()) return false; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridJobSiblingsRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/GridJobSiblingsRequest.java index 8a11cef33aa21..d743a355f8d8e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridJobSiblingsRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridJobSiblingsRequest.java @@ -161,4 +161,4 @@ public byte[] topicBytes() { @Override public String toString() { return S.toString(GridJobSiblingsRequest.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridJobSiblingsResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/GridJobSiblingsResponse.java index 3911446d2b86b..dc59ab5f3057c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridJobSiblingsResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridJobSiblingsResponse.java @@ -141,4 +141,4 @@ public void unmarshalSiblings(Marshaller marsh) throws IgniteCheckedException { @Override public String toString() { return S.toString(GridJobSiblingsResponse.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java index 0b40054ec2606..53c7230b79277 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContext.java @@ -43,6 +43,7 @@ import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; +import org.apache.ignite.internal.processors.diagnostic.DiagnosticProcessor; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.hadoop.HadoopHelper; import org.apache.ignite.internal.processors.hadoop.HadoopProcessorAdapter; @@ -60,17 +61,19 @@ import org.apache.ignite.internal.processors.resource.GridResourceProcessor; import org.apache.ignite.internal.processors.rest.GridRestProcessor; import org.apache.ignite.internal.processors.schedule.IgniteScheduleProcessorAdapter; -import org.apache.ignite.internal.processors.security.GridSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurity; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; import org.apache.ignite.internal.processors.service.GridServiceProcessor; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; import org.apache.ignite.internal.processors.subscription.GridInternalSubscriptionProcessor; import org.apache.ignite.internal.processors.task.GridTaskProcessor; import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor; +import org.apache.ignite.internal.stat.IoStatisticsManager; import org.apache.ignite.internal.suggestions.GridPerformanceSuggestions; import org.apache.ignite.internal.util.IgniteExceptionRegistry; import org.apache.ignite.internal.util.StripedExecutor; import org.apache.ignite.internal.util.tostring.GridToStringExclude; +import org.apache.ignite.internal.worker.WorkersRegistry; import org.apache.ignite.plugin.PluginNotFoundException; import org.apache.ignite.plugin.PluginProvider; import org.apache.ignite.thread.IgniteStripedThreadPoolExecutor; @@ -402,11 +405,11 @@ public interface GridKernalContext extends Iterable { public GridCollisionManager collision(); /** - * Gets authentication processor. + * Gets instance of {@link IgniteSecurity}. * - * @return Authentication processor. + * @return Ignite security. */ - public GridSecurityProcessor security(); + public IgniteSecurity security(); /** * Gets load balancing manager. @@ -422,6 +425,13 @@ public interface GridKernalContext extends Iterable { */ public GridIndexingManager indexing(); + /** + * Gets workers registry. + * + * @return Workers registry. + */ + public WorkersRegistry workersRegistry(); + /** * Gets data structures processor. * @@ -429,6 +439,20 @@ public interface GridKernalContext extends Iterable { */ public DataStructuresProcessor dataStructures(); + /** + * Gets long JVM pause detector. + * + * @return Long JVM pause detector. + */ + public LongJVMPauseDetector longJvmPauseDetector(); + + /** + * Gets diagnostic processor. + * + * @return Diagnostic processor. + */ + public DiagnosticProcessor diagnostic(); + /** * Checks whether this node is invalid due to a critical error or not. * @@ -436,6 +460,13 @@ public interface GridKernalContext extends Iterable { */ public boolean invalid(); + /** + * Checks whether this node detected its segmentation from the rest of the grid. + * + * @return {@code True} if this node has segmented, {@code false} otherwise. + */ + public boolean segmented(); + /** * Gets failure processor. */ @@ -664,4 +695,19 @@ public interface GridKernalContext extends Iterable { * @return subscription processor to manage internal-only (strict node-local) subscriptions between components. */ public GridInternalSubscriptionProcessor internalSubscriptionProcessor(); + + /** + * @return IO statistic manager. + */ + public IoStatisticsManager ioStats(); + + /** + * @return Default uncaught exception handler used by thread pools. + */ + public Thread.UncaughtExceptionHandler uncaughtExceptionHandler(); + + /** + * @return {@code True} if node is in recovery mode (before join to topology). + */ + public boolean recoveryMode(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java index 34083340bfefe..486888448206d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridKernalContextImpl.java @@ -37,6 +37,7 @@ import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.managers.checkpoint.GridCheckpointManager; import org.apache.ignite.internal.managers.collision.GridCollisionManager; import org.apache.ignite.internal.managers.communication.GridIoManager; @@ -59,6 +60,7 @@ import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; +import org.apache.ignite.internal.processors.diagnostic.DiagnosticProcessor; import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.hadoop.HadoopHelper; import org.apache.ignite.internal.processors.hadoop.HadoopProcessorAdapter; @@ -78,13 +80,14 @@ import org.apache.ignite.internal.processors.resource.GridResourceProcessor; import org.apache.ignite.internal.processors.rest.GridRestProcessor; import org.apache.ignite.internal.processors.schedule.IgniteScheduleProcessorAdapter; -import org.apache.ignite.internal.processors.security.GridSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurity; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; import org.apache.ignite.internal.processors.service.GridServiceProcessor; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; import org.apache.ignite.internal.processors.subscription.GridInternalSubscriptionProcessor; import org.apache.ignite.internal.processors.task.GridTaskProcessor; import org.apache.ignite.internal.processors.timeout.GridTimeoutProcessor; +import org.apache.ignite.internal.stat.IoStatisticsManager; import org.apache.ignite.internal.suggestions.GridPerformanceSuggestions; import org.apache.ignite.internal.util.IgniteExceptionRegistry; import org.apache.ignite.internal.util.StripedExecutor; @@ -94,6 +97,7 @@ import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.worker.WorkersRegistry; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.plugin.PluginNotFoundException; import org.apache.ignite.plugin.PluginProvider; @@ -153,7 +157,7 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable /** */ @GridToStringExclude - private GridSecurityProcessor securityProc; + private IgniteSecurity security; /** */ @GridToStringExclude @@ -292,6 +296,10 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable @GridToStringExclude private IgniteAuthenticationProcessor authProc; + /** Diagnostic processor. */ + @GridToStringInclude + private DiagnosticProcessor diagnosticProcessor; + /** */ @GridToStringExclude private List comps = new LinkedList<>(); @@ -360,6 +368,17 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable @GridToStringExclude private Map attrs = new HashMap<>(); + /** */ + @GridToStringExclude + private WorkersRegistry workersRegistry; + + /** */ + @GridToStringExclude + private LongJVMPauseDetector pauseDetector; + + /** */ + private Thread.UncaughtExceptionHandler hnd; + /** */ private IgniteEx grid; @@ -396,6 +415,12 @@ public class GridKernalContextImpl implements GridKernalContext, Externalizable /** Failure processor. */ private FailureProcessor failureProc; + /** Recovery mode flag. Flag is set to {@code false} when discovery manager started. */ + private boolean recoveryMode = true; + + /** IO statistics manager. */ + private IoStatisticsManager ioStatMgr; + /** * No-arg constructor is required by externalization. */ @@ -426,6 +451,9 @@ public GridKernalContextImpl() { * @param schemaExecSvc Schema executor service. * @param customExecSvcs Custom named executors. * @param plugins Plugin providers. + * @param workerRegistry Worker registry. + * @param hnd Default uncaught exception handler used by thread pools. + * @param pauseDetector Long JVM pause detector. */ @SuppressWarnings("TypeMayBeWeakened") protected GridKernalContextImpl( @@ -450,7 +478,10 @@ protected GridKernalContextImpl( ExecutorService schemaExecSvc, @Nullable Map customExecSvcs, List plugins, - IgnitePredicate clsFilter + IgnitePredicate clsFilter, + WorkersRegistry workerRegistry, + Thread.UncaughtExceptionHandler hnd, + LongJVMPauseDetector pauseDetector ) { assert grid != null; assert cfg != null; @@ -475,6 +506,9 @@ protected GridKernalContextImpl( this.qryExecSvc = qryExecSvc; this.schemaExecSvc = schemaExecSvc; this.customExecSvcs = customExecSvcs; + this.workersRegistry = workerRegistry; + this.hnd = hnd; + this.pauseDetector = pauseDetector; marshCtx = new MarshallerContextImpl(plugins, clsFilter); @@ -486,6 +520,8 @@ protected GridKernalContextImpl( log.debug("Failed to load spring component, will not be able to extract userVersion from " + "META-INF/ignite.xml."); } + + ioStatMgr = new IoStatisticsManager(); } /** {@inheritDoc} */ @@ -531,8 +567,6 @@ else if (comp instanceof GridFailoverManager) failoverMgr = (GridFailoverManager)comp; else if (comp instanceof GridCollisionManager) colMgr = (GridCollisionManager)comp; - else if (comp instanceof GridSecurityProcessor) - securityProc = (GridSecurityProcessor)comp; else if (comp instanceof GridLoadBalancerManager) loadMgr = (GridLoadBalancerManager)comp; else if (comp instanceof GridIndexingManager) @@ -607,6 +641,10 @@ else if (comp instanceof GridInternalSubscriptionProcessor) internalSubscriptionProc = (GridInternalSubscriptionProcessor)comp; else if (comp instanceof IgniteAuthenticationProcessor) authProc = (IgniteAuthenticationProcessor)comp; + else if (comp instanceof IgniteSecurity) + security = (IgniteSecurity)comp; + else if (comp instanceof DiagnosticProcessor) + diagnosticProcessor = (DiagnosticProcessor)comp; else if (!(comp instanceof DiscoveryNodeValidationProcessor || comp instanceof PlatformPluginProcessor)) assert (comp instanceof GridPluginComponent) : "Unknown manager class: " + comp.getClass(); @@ -765,8 +803,8 @@ else if (helper instanceof HadoopHelper) } /** {@inheritDoc} */ - @Override public GridSecurityProcessor security() { - return securityProc; + @Override public IgniteSecurity security() { + return security; } /** {@inheritDoc} */ @@ -779,6 +817,11 @@ else if (helper instanceof HadoopHelper) return indexingMgr; } + /** {@inheritDoc} */ + @Override public WorkersRegistry workersRegistry() { + return workersRegistry; + } + /** {@inheritDoc} */ @Override public GridAffinityProcessor affinity() { return affProc; @@ -885,6 +928,16 @@ else if (helper instanceof HadoopHelper) return perf; } + /** {@inheritDoc} */ + @Override public LongJVMPauseDetector longJvmPauseDetector() { + return pauseDetector; + } + + /** {@inheritDoc} */ + @Override public DiagnosticProcessor diagnostic() { + return diagnosticProcessor; + } + /** {@inheritDoc} */ @Override public void printMemoryStats() { X.println(">>> "); @@ -1096,6 +1149,11 @@ protected Object readResolve() throws ObjectStreamException { return internalSubscriptionProc; } + /** {@inheritDoc} */ + @Override public IoStatisticsManager ioStats() { + return ioStatMgr; + } + /** * @param disconnected Disconnected flag. */ @@ -1112,7 +1170,18 @@ void disconnected(boolean disconnected) { @Override public boolean invalid() { FailureProcessor failureProc = failure(); - return failureProc != null && failureProc.failureContext() != null; + return failureProc != null + && failureProc.failureContext() != null + && failureProc.failureContext().type() != FailureType.SEGMENTATION; + } + + /** {@inheritDoc} */ + @Override public boolean segmented() { + FailureProcessor failureProc = failure(); + + return failureProc != null + && failureProc.failureContext() != null + && failureProc.failureContext().type() == FailureType.SEGMENTATION; } /** {@inheritDoc} */ @@ -1120,6 +1189,23 @@ void disconnected(boolean disconnected) { return failureProc; } + /** {@inheritDoc} */ + public Thread.UncaughtExceptionHandler uncaughtExceptionHandler() { + return hnd; + } + + /** {@inheritDoc} */ + @Override public boolean recoveryMode() { + return recoveryMode; + } + + /** + * @param recoveryMode Recovery mode. + */ + public void recoveryMode(boolean recoveryMode) { + this.recoveryMode = recoveryMode; + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(GridKernalContextImpl.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java index c146eca255aba..688ca17fad53b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridMessageListenHandler.java @@ -25,12 +25,15 @@ import java.util.Map; import java.util.UUID; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.internal.managers.deployment.GridDeployment; import org.apache.ignite.internal.managers.deployment.GridDeploymentInfoBean; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.continuous.GridContinuousBatch; import org.apache.ignite.internal.processors.continuous.GridContinuousBatchAdapter; import org.apache.ignite.internal.processors.continuous.GridContinuousHandler; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.lang.GridPeerDeployAware; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.S; @@ -66,6 +69,9 @@ public class GridMessageListenHandler implements GridContinuousHandler { /** */ private boolean depEnabled; + /** P2P unmarshalling future. */ + private IgniteInternalFuture p2pUnmarshalFut = new GridFinishedFuture<>(); + /** * Required by {@link Externalizable}. */ @@ -84,22 +90,6 @@ public GridMessageListenHandler(@Nullable Object topic, IgniteBiPredicate { + if (fut.error() == null) + ctx.io().addUserMessageListener(topic, pred, nodeId); + }); return RegisterStatus.REGISTERED; } @@ -180,18 +172,27 @@ public GridMessageListenHandler(GridMessageListenHandler orig) { assert ctx != null; assert ctx.config().isPeerClassLoadingEnabled(); - GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, - depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); + try { + GridDeployment dep = ctx.deploy().getGlobalDeployment(depInfo.deployMode(), clsName, clsName, + depInfo.userVersion(), nodeId, depInfo.classLoaderId(), depInfo.participants(), null); - if (dep == null) - throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + if (dep == null) + throw new IgniteDeploymentCheckedException("Failed to obtain deployment for class: " + clsName); + + ClassLoader ldr = dep.classLoader(); - ClassLoader ldr = dep.classLoader(); + if (topicBytes != null) + topic = U.unmarshal(ctx, topicBytes, U.resolveClassLoader(ldr, ctx.config())); - if (topicBytes != null) - topic = U.unmarshal(ctx, topicBytes, U.resolveClassLoader(ldr, ctx.config())); + pred = U.unmarshal(ctx, predBytes, U.resolveClassLoader(ldr, ctx.config())); + } + catch (IgniteCheckedException | IgniteException e) { + ((GridFutureAdapter)p2pUnmarshalFut).onDone(e); + + throw e; + } - pred = U.unmarshal(ctx, predBytes, U.resolveClassLoader(ldr, ctx.config())); + ((GridFutureAdapter)p2pUnmarshalFut).onDone(); } /** {@inheritDoc} */ @@ -250,6 +251,7 @@ public GridMessageListenHandler(GridMessageListenHandler orig) { depEnabled = in.readBoolean(); if (depEnabled) { + p2pUnmarshalFut = new GridFutureAdapter<>(); topicBytes = U.readByteArray(in); predBytes = U.readByteArray(in); clsName = U.readString(in); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridPluginComponent.java b/modules/core/src/main/java/org/apache/ignite/internal/GridPluginComponent.java index 1e49e5c410b89..61dad17efb6ec 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridPluginComponent.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridPluginComponent.java @@ -17,6 +17,8 @@ package org.apache.ignite.internal; +import java.io.Serializable; +import java.util.Map; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.lang.IgniteFuture; @@ -82,7 +84,7 @@ public PluginProvider plugin() { /** {@inheritDoc} */ @Nullable @Override public DiscoveryDataExchangeType discoveryDataType() { - return null; + return DiscoveryDataExchangeType.PLUGIN; } /** {@inheritDoc} */ @@ -107,8 +109,19 @@ public PluginProvider plugin() { /** {@inheritDoc} */ @Nullable @Override public IgniteNodeValidationResult validateNode(ClusterNode node) { + return null; + } + + /** {@inheritDoc} */ + @Nullable @Override public IgniteNodeValidationResult validateNode(ClusterNode node, + JoiningNodeDiscoveryData discoData) { try { - plugin.validateNewNode(node); + Map map = (Map)discoData.joiningNodeData(); + + if (map != null) + plugin.validateNewNode(node, map.get(plugin.name())); + else + plugin.validateNewNode(node, null); return null; } @@ -117,11 +130,6 @@ public PluginProvider plugin() { } } - /** {@inheritDoc} */ - @Nullable @Override public IgniteNodeValidationResult validateNode(ClusterNode node, JoiningNodeDiscoveryData discoData) { - return null; - } - /** {@inheritDoc} */ @Override public void printMemoryStats() { // No-op. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridTaskCancelRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/GridTaskCancelRequest.java index 273d0a777a468..71c318b537dd5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridTaskCancelRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridTaskCancelRequest.java @@ -124,4 +124,4 @@ public IgniteUuid sessionId() { @Override public String toString() { return S.toString(GridTaskCancelRequest.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionImpl.java index ce6e8313abdd0..ba3e6c0845c40 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionImpl.java @@ -294,7 +294,7 @@ public boolean release() { if (timeout == 0) timeout = Long.MAX_VALUE; - long now = U.currentTimeMillis(); + long now = System.currentTimeMillis(); // Prevent overflow. long end = now + timeout < 0 ? Long.MAX_VALUE : now + timeout; @@ -303,11 +303,17 @@ public boolean release() { if (end > endTime) end = endTime; + timeout = end - now; + + long startNanos = System.nanoTime(); + synchronized (mux) { - while (!closed && (attrs == null || !attrs.containsKey(key)) && now < end) { - mux.wait(end - now); + long passedMillis = 0L; + + while (!closed && (attrs == null || !attrs.containsKey(key)) && passedMillis < timeout) { + mux.wait(timeout - passedMillis); - now = U.currentTimeMillis(); + passedMillis = U.millisSinceNanos(startNanos); } if (closed) @@ -326,7 +332,7 @@ public boolean release() { if (timeout == 0) timeout = Long.MAX_VALUE; - long now = U.currentTimeMillis(); + long now = System.currentTimeMillis(); // Prevent overflow. long end = now + timeout < 0 ? Long.MAX_VALUE : now + timeout; @@ -335,13 +341,19 @@ public boolean release() { if (end > endTime) end = endTime; + timeout = end - now; + + long startNanos = System.nanoTime(); + synchronized (mux) { boolean isFound = false; - while (!closed && !(isFound = isAttributeSet(key, val)) && now < end) { - mux.wait(end - now); + long passedMillis = 0L; + + while (!closed && !(isFound = isAttributeSet(key, val)) && passedMillis < timeout) { + mux.wait(timeout - passedMillis); - now = U.currentTimeMillis(); + passedMillis = U.millisSinceNanos(startNanos); } if (closed) @@ -364,7 +376,7 @@ public boolean release() { if (timeout == 0) timeout = Long.MAX_VALUE; - long now = U.currentTimeMillis(); + long now = System.currentTimeMillis(); // Prevent overflow. long end = now + timeout < 0 ? Long.MAX_VALUE : now + timeout; @@ -373,11 +385,17 @@ public boolean release() { if (end > endTime) end = endTime; + timeout = end - now; + + long startNanos = System.nanoTime(); + synchronized (mux) { - while (!closed && (attrs == null || !attrs.keySet().containsAll(keys)) && now < end) { - mux.wait(end - now); + long passedMillis = 0L; + + while (!closed && (attrs == null || !attrs.keySet().containsAll(keys)) && passedMillis < timeout) { + mux.wait(timeout - passedMillis); - now = U.currentTimeMillis(); + passedMillis = U.millisSinceNanos(startNanos); } if (closed) @@ -405,7 +423,7 @@ public boolean release() { if (timeout == 0) timeout = Long.MAX_VALUE; - long now = U.currentTimeMillis(); + long now = System.currentTimeMillis(); // Prevent overflow. long end = now + timeout < 0 ? Long.MAX_VALUE : now + timeout; @@ -414,18 +432,24 @@ public boolean release() { if (end > endTime) end = endTime; + timeout = end - now; + + long startNanos = System.nanoTime(); + synchronized (mux) { boolean isFound = false; - while (!closed && now < end) { + long passedMillis = 0L; + + while (!closed && passedMillis < timeout) { isFound = this.attrs != null && this.attrs.entrySet().containsAll(attrs.entrySet()); if (isFound) break; - mux.wait(end - now); + mux.wait(timeout - passedMillis); - now = U.currentTimeMillis(); + passedMillis = U.millisSinceNanos(startNanos); } if (closed) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionRequest.java index dbac893189e38..576392e097fc3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridTaskSessionRequest.java @@ -189,4 +189,4 @@ public IgniteUuid getJobId() { @Override public String toString() { return S.toString(GridTaskSessionRequest.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/GridTopic.java b/modules/core/src/main/java/org/apache/ignite/internal/GridTopic.java index 4932e671376dc..0b2d41a39ec89 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/GridTopic.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/GridTopic.java @@ -121,7 +121,13 @@ public enum GridTopic { TOPIC_WAL, /** */ - TOPIC_AUTH; + TOPIC_METRICS, + + /** */ + TOPIC_AUTH, + + /** */ + TOPIC_EXCHANGE; /** Enum values. */ private static final GridTopic[] VALS = values(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java index 030e2dbe05956..f19fd4e56142f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteEventsImpl.java @@ -172,6 +172,9 @@ public IgniteEventsImpl(GridKernalContext ctx, ClusterGroupAdapter prj, boolean autoUnsubscribe, prj.predicate())); } + catch (IgniteCheckedException e) { + throw U.convertException(e); + } finally { unguard(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java new file mode 100644 index 0000000000000..e586a60a44732 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteFeatures.java @@ -0,0 +1,177 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +import java.util.BitSet; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; +import org.apache.ignite.spi.communication.tcp.messages.HandshakeWaitMessage; + +import static org.apache.ignite.IgniteSystemProperties.getBoolean; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IGNITE_FEATURES; + +/** + * Defines supported features and check its on other nodes. + */ +public enum IgniteFeatures { + /** + * Support of {@link HandshakeWaitMessage} by {@link TcpCommunicationSpi}. + */ + TCP_COMMUNICATION_SPI_HANDSHAKE_WAIT_MESSAGE(0), + + /** Cache metrics v2 support. */ + CACHE_METRICS_V2(1), + + /** Data paket compression. */ + DATA_PACKET_COMPRESSION(3), + + /** Support of different rebalance size for nodes. */ + DIFFERENT_REBALANCE_POOL_SIZE(4), + + /** + * Support of providing thread dump of thread that started transaction. Used for dumping + * long running transactions. + */ + TRANSACTION_OWNER_THREAD_DUMP_PROVIDING(6), + + + /** Displaying versbose transaction information: --info option of --tx control script command. */ + TX_INFO_COMMAND(7), + + /** Command which allow to detect and cleanup garbage which could left after destroying caches in shared groups */ + FIND_AND_DELETE_GARBAGE_COMMAND(8), + + /** Supports tracking update counter for transactions. */ + TX_TRACKING_UPDATE_COUNTER(12), + + /** Distributed metastorage. */ + IGNITE_SECURITY_PROCESSOR(13), + + /** Replacing TcpDiscoveryNode field with nodeId field in discovery messages. */ + TCP_DISCOVERY_MESSAGE_NODE_COMPACT_REPRESENTATION(14), + + /** LRT system and user time dump settings. */ + LRT_SYSTEM_USER_TIME_DUMP_SETTINGS(18), + + /** + * A mode when data nodes throttle update rate regarding to DR sender load + */ + DR_DATA_NODE_SMART_THROTTLING(19), + + /** + * Support enabling DR events from Web Console. + */ + WC_DR_EVENTS(20) + ; + + /** + * Unique feature identifier. + */ + private final int featureId; + + /** + * @param featureId Feature ID. + */ + IgniteFeatures(int featureId) { + this.featureId = featureId; + } + + /** + * @return Feature ID. + */ + public int getFeatureId() { + return featureId; + } + + /** + * Checks that feature supported by node. + * + * @param clusterNode Cluster node to check. + * @param feature Feature to check. + * @return {@code True} if feature is declared to be supported by remote node. + */ + public static boolean nodeSupports(ClusterNode clusterNode, IgniteFeatures feature) { + final byte[] features = clusterNode.attribute(ATTR_IGNITE_FEATURES); + + if (features == null) + return false; + + return nodeSupports(features, feature); + } + + /** + * Checks that feature supported by node. + * + * @param featuresAttrBytes Byte array value of supported features node attribute. + * @param feature Feature to check. + * @return {@code True} if feature is declared to be supported by remote node. + */ + public static boolean nodeSupports(byte[] featuresAttrBytes, IgniteFeatures feature) { + int featureId = feature.getFeatureId(); + + // Same as "BitSet.valueOf(features).get(featureId)" + + int byteIdx = featureId >>> 3; + + if (byteIdx >= featuresAttrBytes.length) + return false; + + int bitIdx = featureId & 0x7; + + return (featuresAttrBytes[byteIdx] & (1 << bitIdx)) != 0; + } + + /** + * Checks that feature supported by all nodes. + * + * @param nodes cluster nodes to check their feature support. + * @return if feature is declared to be supported by all nodes + */ + public static boolean allNodesSupports(Iterable nodes, IgniteFeatures feature) { + for (ClusterNode next : nodes) { + if (!nodeSupports(next, feature)) + return false; + } + + return true; + } + + /** + * Features supported by the current node. + * + * @return Byte array representing all supported features by current node. + */ + public static byte[] allFeatures() { + final BitSet set = new BitSet(); + + for (IgniteFeatures value : IgniteFeatures.values()) { + // After rolling upgrade, our security has more strict validation. This may come as a surprise to customers. + if (IGNITE_SECURITY_PROCESSOR == value && !getBoolean(IGNITE_SECURITY_PROCESSOR.name(), false)) + continue; + + final int featureId = value.getFeatureId(); + + assert !set.get(featureId) : "Duplicate feature ID found for [" + value + "] having same ID [" + + featureId + "]"; + + set.set(featureId); + } + + return set.toByteArray(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java index 8bc46fd1db2f6..486b7d471eba5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteKernal.java @@ -17,22 +17,23 @@ package org.apache.ignite.internal; -import java.io.BufferedReader; import java.io.Externalizable; import java.io.File; -import java.io.FileInputStream; -import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; -import java.io.InputStreamReader; import java.io.InvalidObjectException; import java.io.ObjectInput; import java.io.ObjectOutput; import java.io.ObjectStreamException; import java.io.Serializable; +import java.io.UncheckedIOException; import java.lang.management.ManagementFactory; import java.lang.management.RuntimeMXBean; import java.lang.reflect.Constructor; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; import java.text.DateFormat; import java.text.DecimalFormat; import java.util.ArrayList; @@ -41,12 +42,10 @@ import java.util.Collections; import java.util.Date; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.ListIterator; import java.util.Map; import java.util.Properties; -import java.util.Set; import java.util.UUID; import java.util.concurrent.ExecutorService; import java.util.concurrent.ThreadPoolExecutor; @@ -54,7 +53,6 @@ import java.util.concurrent.atomic.AtomicReference; import javax.cache.CacheException; import javax.management.JMException; -import javax.management.ObjectName; import org.apache.ignite.DataRegionMetrics; import org.apache.ignite.DataRegionMetricsAdapter; import org.apache.ignite.DataStorageMetrics; @@ -99,13 +97,14 @@ import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.MemoryConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; +import org.apache.ignite.events.EventType; import org.apache.ignite.internal.binary.BinaryEnumCache; import org.apache.ignite.internal.binary.BinaryMarshaller; import org.apache.ignite.internal.binary.BinaryUtils; import org.apache.ignite.internal.cluster.ClusterGroupAdapter; import org.apache.ignite.internal.cluster.IgniteClusterEx; -import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.managers.GridManager; +import org.apache.ignite.internal.managers.IgniteMBeansManager; import org.apache.ignite.internal.managers.checkpoint.GridCheckpointManager; import org.apache.ignite.internal.managers.collision.GridCollisionManager; import org.apache.ignite.internal.managers.communication.GridIoManager; @@ -118,6 +117,7 @@ import org.apache.ignite.internal.managers.loadbalancer.GridLoadBalancerManager; import org.apache.ignite.internal.marshaller.optimized.OptimizedMarshaller; import org.apache.ignite.internal.processors.GridProcessor; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.affinity.GridAffinityProcessor; import org.apache.ignite.internal.processors.authentication.IgniteAuthenticationProcessor; import org.apache.ignite.internal.processors.cache.CacheConfigurationOverride; @@ -125,7 +125,6 @@ import org.apache.ignite.internal.processors.cache.GridCacheContext; import org.apache.ignite.internal.processors.cache.GridCacheProcessor; import org.apache.ignite.internal.processors.cache.GridCacheUtilityKey; -import org.apache.ignite.internal.processors.cache.GridCacheUtils; import org.apache.ignite.internal.processors.cache.IgniteCacheProxy; import org.apache.ignite.internal.processors.cache.IgniteInternalCache; import org.apache.ignite.internal.processors.cache.binary.CacheObjectBinaryProcessorImpl; @@ -134,11 +133,14 @@ import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.processors.closure.GridClosureProcessor; import org.apache.ignite.internal.processors.cluster.ClusterProcessor; +import org.apache.ignite.internal.processors.cluster.DiscoveryDataClusterState; import org.apache.ignite.internal.processors.cluster.GridClusterStateProcessor; import org.apache.ignite.internal.processors.cluster.IGridClusterStateProcessor; import org.apache.ignite.internal.processors.continuous.GridContinuousProcessor; import org.apache.ignite.internal.processors.datastreamer.DataStreamProcessor; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; +import org.apache.ignite.internal.processors.diagnostic.DiagnosticProcessor; +import org.apache.ignite.internal.processors.failure.FailureProcessor; import org.apache.ignite.internal.processors.hadoop.Hadoop; import org.apache.ignite.internal.processors.hadoop.HadoopProcessorAdapter; import org.apache.ignite.internal.processors.job.GridJobProcessor; @@ -159,6 +161,9 @@ import org.apache.ignite.internal.processors.resource.GridSpringResourceContext; import org.apache.ignite.internal.processors.rest.GridRestProcessor; import org.apache.ignite.internal.processors.security.GridSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurityProcessor; +import org.apache.ignite.internal.processors.security.IgniteSecurity; +import org.apache.ignite.internal.processors.security.NoOpIgniteSecurityProcessor; import org.apache.ignite.internal.processors.segmentation.GridSegmentationProcessor; import org.apache.ignite.internal.processors.service.GridServiceProcessor; import org.apache.ignite.internal.processors.session.GridTaskSessionProcessor; @@ -169,6 +174,7 @@ import org.apache.ignite.internal.suggestions.JvmConfigurationSuggestions; import org.apache.ignite.internal.suggestions.OsConfigurationSuggestions; import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.TimeBag; import org.apache.ignite.internal.util.future.GridCompoundFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; @@ -185,6 +191,7 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.worker.WorkersRegistry; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.lang.IgnitePredicate; @@ -193,11 +200,9 @@ import org.apache.ignite.lifecycle.LifecycleBean; import org.apache.ignite.lifecycle.LifecycleEventType; import org.apache.ignite.marshaller.MarshallerExclusions; +import org.apache.ignite.marshaller.MarshallerUtils; import org.apache.ignite.marshaller.jdk.JdkMarshaller; -import org.apache.ignite.mxbean.ClusterMetricsMXBean; import org.apache.ignite.mxbean.IgniteMXBean; -import org.apache.ignite.mxbean.StripedExecutorMXBean; -import org.apache.ignite.mxbean.ThreadPoolMXBean; import org.apache.ignite.plugin.IgnitePlugin; import org.apache.ignite.plugin.PluginNotFoundException; import org.apache.ignite.plugin.PluginProvider; @@ -210,12 +215,14 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_BINARY_MARSHALLER_USE_STRING_SERIALIZATION_VER_2; import static org.apache.ignite.IgniteSystemProperties.IGNITE_CONFIG_URL; import static org.apache.ignite.IgniteSystemProperties.IGNITE_DAEMON; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP; import static org.apache.ignite.IgniteSystemProperties.IGNITE_NO_ASCII; import static org.apache.ignite.IgniteSystemProperties.IGNITE_OPTIMIZED_MARSHALLER_USE_DEFAULT_SUID; import static org.apache.ignite.IgniteSystemProperties.IGNITE_REST_START_ON_CLIENT; import static org.apache.ignite.IgniteSystemProperties.IGNITE_SKIP_CONFIGURATION_CONSISTENCY_CHECK; import static org.apache.ignite.IgniteSystemProperties.IGNITE_STARVATION_CHECK_INTERVAL; import static org.apache.ignite.IgniteSystemProperties.IGNITE_SUCCESS_FILE; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_USE_POOL_FOR_LAZY_QUERIES; import static org.apache.ignite.IgniteSystemProperties.getBoolean; import static org.apache.ignite.IgniteSystemProperties.snapshot; import static org.apache.ignite.internal.GridKernalState.DISCONNECTED; @@ -235,6 +242,8 @@ import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_DATA_STORAGE_CONFIG; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_DATA_STREAMER_POOL_SIZE; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_DEPLOYMENT_MODE; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_DYNAMIC_CACHE_START_ROLLBACK_SUPPORTED; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IGNITE_FEATURES; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IGNITE_INSTANCE_NAME; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_IPS; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_JIT_NAME; @@ -254,11 +263,13 @@ import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_PEER_CLASSLOADING; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_PHY_RAM; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_PREFIX; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_REBALANCE_POOL_SIZE; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_RESTART_ENABLED; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_REST_PORT_RANGE; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_SPI_CLASS; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_TX_CONFIG; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_USER_NAME; +import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_USE_POOL_FOR_LAZY_QUERIES; import static org.apache.ignite.internal.IgniteVersionUtils.ACK_VER_STR; import static org.apache.ignite.internal.IgniteVersionUtils.BUILD_TSTAMP_STR; import static org.apache.ignite.internal.IgniteVersionUtils.COPYRIGHT; @@ -267,8 +278,6 @@ import static org.apache.ignite.internal.IgniteVersionUtils.VER_STR; import static org.apache.ignite.lifecycle.LifecycleEventType.AFTER_NODE_START; import static org.apache.ignite.lifecycle.LifecycleEventType.BEFORE_NODE_START; -import static org.apache.ignite.marshaller.MarshallerUtils.CLS_NAMES_FILE; -import static org.apache.ignite.marshaller.MarshallerUtils.JDK_CLS_NAMES_FILE; /** * Ignite kernal. @@ -292,9 +301,14 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable { /** Force complete reconnect future. */ private static final Object STOP_RECONNECT = new Object(); - static { - LongJVMPauseDetector.start(); - } + /** Separator for formatted coordinator properties. */ + public static final String COORDINATOR_PROPERTIES_SEPARATOR = ","; + + /** Default long operations dump timeout. */ + public static final long DFLT_LONG_OPERATIONS_DUMP_TIMEOUT = 60_000L; + + /** Long jvm pause detector. */ + private LongJVMPauseDetector longJVMPauseDetector; /** */ @GridToStringExclude @@ -302,7 +316,7 @@ public class IgniteKernal implements IgniteEx, IgniteMXBean, Externalizable { /** Helper that registers MBeans */ @GridToStringExclude - private final MBeansManager mBeansMgr = new MBeansManager(); + private IgniteMBeansManager mBeansMgr; /** Configuration. */ private IgniteConfiguration cfg; @@ -467,22 +481,22 @@ public IgniteKernal(@Nullable GridSpringResourceContext rsrcCtx) { /** {@inheritDoc} */ @Override public long getLongJVMPausesCount() { - return LongJVMPauseDetector.longPausesCount(); + return longJVMPauseDetector != null ? longJVMPauseDetector.longPausesCount() : 0; } /** {@inheritDoc} */ @Override public long getLongJVMPausesTotalDuration() { - return LongJVMPauseDetector.longPausesTotalDuration(); + return longJVMPauseDetector != null ? longJVMPauseDetector.longPausesTotalDuration() : 0; } /** {@inheritDoc} */ @Override public Map getLongJVMPauseLastEvents() { - return LongJVMPauseDetector.longPauseEvents(); + return longJVMPauseDetector != null ? longJVMPauseDetector.longPauseEvents() : Collections.emptyMap(); } /** {@inheritDoc} */ @Override public String getUpTimeFormatted() { - return X.timeSpan2HMSM(U.currentTimeMillis() - startTime); + return X.timeSpan2DHMSM(U.currentTimeMillis() - startTime); } /** {@inheritDoc} */ @@ -497,6 +511,36 @@ public IgniteKernal(@Nullable GridSpringResourceContext rsrcCtx) { return Arrays.toString(cfg.getCheckpointSpi()); } + /** {@inheritDoc} */ + @Override public String getCurrentCoordinatorFormatted() { + ClusterNode node = ctx.discovery().oldestAliveServerNode(AffinityTopologyVersion.NONE); + + if (node == null) + return ""; + + return new StringBuilder() + .append(node.addresses()) + .append(COORDINATOR_PROPERTIES_SEPARATOR) + .append(node.id()) + .append(COORDINATOR_PROPERTIES_SEPARATOR) + .append(node.order()) + .append(COORDINATOR_PROPERTIES_SEPARATOR) + .append(node.hostNames()) + .toString(); + } + + /** {@inheritDoc} */ + @Override public boolean isNodeInBaseline() { + ClusterNode locNode = localNode(); + + if (locNode.isClient() || locNode.isDaemon()) + return false; + + DiscoveryDataClusterState clusterState = ctx.state().clusterState(); + + return clusterState.hasBaselineTopology() && CU.baselineNode(locNode, clusterState); + } + /** {@inheritDoc} */ @Override public String getCommunicationSpiFormatted() { assert cfg != null; @@ -647,7 +691,7 @@ public IgniteKernal(@Nullable GridSpringResourceContext rsrcCtx) { } /** - * @param name New attribute name. + * @param name New attribute name. * @param val New attribute value. * @throws IgniteCheckedException If duplicated SPI name found. */ @@ -706,6 +750,109 @@ private void notifyLifecycleBeansEx(LifecycleEventType evt) { } } + /** */ + private void ackClassPathElementRecursive(File clsPathEntry, SB clsPathContent) { + if (clsPathEntry.isDirectory()) { + String[] list = clsPathEntry.list(); + + for (String listElement : list) + ackClassPathElementRecursive(new File(clsPathEntry, listElement), clsPathContent); + } + else { + String path = clsPathEntry.getAbsolutePath(); + + if (path.endsWith(".class")) + clsPathContent.a(path).a(";"); + } + } + + /** */ + private void ackClassPathEntry(String clsPathEntry, SB clsPathContent) { + File clsPathElementFile = new File(clsPathEntry); + + if (clsPathElementFile.isDirectory()) + ackClassPathElementRecursive(clsPathElementFile, clsPathContent); + else { + String extension = clsPathEntry.length() >= 4 + ? clsPathEntry.substring(clsPathEntry.length() - 4).toLowerCase() + : null; + + if (".jar".equals(extension) || ".zip".equals(extension)) + clsPathContent.a(clsPathEntry).a(";"); + } + } + + /** */ + private void ackClassPathWildCard(String clsPathEntry, SB clsPathContent) { + final int lastSeparatorIdx = clsPathEntry.lastIndexOf(File.separator); + + final int asteriskIdx = clsPathEntry.indexOf('*'); + + //just to log possibly incorrent entries to err + if (asteriskIdx >= 0 && asteriskIdx < lastSeparatorIdx) + throw new RuntimeException("Could not parse classpath entry"); + + final int fileMaskFirstIdx = lastSeparatorIdx + 1; + + final String fileMask = + (fileMaskFirstIdx >= clsPathEntry.length()) ? "*.jar" : clsPathEntry.substring(fileMaskFirstIdx); + + Path path = Paths.get(lastSeparatorIdx > 0 ? clsPathEntry.substring(0, lastSeparatorIdx) : ".") + .toAbsolutePath() + .normalize(); + + if (lastSeparatorIdx == 0) + path = path.getRoot(); + + try { + DirectoryStream files = + Files.newDirectoryStream(path, fileMask); + + for (Path f : files) { + String s = f.toString(); + + if (s.toLowerCase().endsWith(".jar")) + clsPathContent.a(f.toString()).a(";"); + } + } + catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + /** + * Prints the list of *.jar and *.class files containing in classpath. + */ + private void ackClassPathContent() { + assert log != null; + + boolean enabled = IgniteSystemProperties.getBoolean(IGNITE_LOG_CLASSPATH_CONTENT_ON_STARTUP, true); + + if (enabled) { + String clsPath = System.getProperty("java.class.path", "."); + + String[] clsPathElements = clsPath.split(File.pathSeparator); + + U.log(log, "Classpath value: " + clsPath); + + SB clsPathContent = new SB("List of files containing in classpath: "); + + for (String clsPathEntry : clsPathElements) { + try { + if (clsPathEntry.contains("*")) + ackClassPathWildCard(clsPathEntry, clsPathContent); + else + ackClassPathEntry(clsPathEntry, clsPathContent); + } + catch (Exception e) { + U.warn(log, String.format("Could not log class path entry '%s': %s", clsPathEntry, e.getMessage())); + } + } + + U.log(log, clsPathContent.toString()); + } + } + /** * @param cfg Configuration to use. * @param utilityCachePool Utility cache pool. @@ -724,6 +871,8 @@ private void notifyLifecycleBeansEx(LifecycleEventType evt) { * @param schemaExecSvc Schema executor service. * @param customExecSvcs Custom named executors. * @param errHnd Error handler to use for notification about startup problems. + * @param workerRegistry Worker registry. + * @param hnd Default uncaught exception handler used by thread pools. * @throws IgniteCheckedException Thrown in case of any errors. */ @SuppressWarnings({"CatchGenericClass", "unchecked"}) @@ -745,10 +894,12 @@ public void start( ExecutorService qryExecSvc, ExecutorService schemaExecSvc, @Nullable final Map customExecSvcs, - GridAbsClosure errHnd + GridAbsClosure errHnd, + WorkersRegistry workerRegistry, + Thread.UncaughtExceptionHandler hnd, + TimeBag startTimer ) - throws IgniteCheckedException - { + throws IgniteCheckedException { gw.compareAndSet(null, new GridKernalGatewayImpl(cfg.getIgniteInstanceName())); GridKernalGateway gw = this.gw.get(); @@ -796,6 +947,10 @@ public void start( log = (GridLoggerProxy)cfg.getGridLogger().getLogger( getClass().getName() + (igniteInstanceName != null ? '%' + igniteInstanceName : "")); + longJVMPauseDetector = new LongJVMPauseDetector(log); + + longJVMPauseDetector.start(); + RuntimeMXBean rtBean = ManagementFactory.getRuntimeMXBean(); // Ack various information. @@ -862,9 +1017,16 @@ public void start( schemaExecSvc, customExecSvcs, plugins, - classNameFilter() + MarshallerUtils.classNameFilter(this.getClass().getClassLoader()), + workerRegistry, + hnd, + longJVMPauseDetector ); + startProcessor(new DiagnosticProcessor(ctx)); + + mBeansMgr = new IgniteMBeansManager(this); + cfg.getMarshaller().setContext(ctx.marshallerContext()); GridInternalSubscriptionProcessor subscriptionProc = new GridInternalSubscriptionProcessor(ctx); @@ -924,7 +1086,7 @@ public void start( startProcessor(new GridTimeoutProcessor(ctx)); // Start security processors. - startProcessor(createComponent(GridSecurityProcessor.class, ctx)); + startProcessor(securityProcessor()); // Start SPI managers. // NOTE: that order matters as there are dependencies between managers. @@ -951,9 +1113,15 @@ public void start( try { startProcessor(new PdsConsistentIdProcessor(ctx)); startProcessor(createComponent(DiscoveryNodeValidationProcessor.class, ctx)); - startProcessor(new GridAffinityProcessor(ctx)); + startProcessor(new GridAffinityProcessor(ctx)); startProcessor(createComponent(GridSegmentationProcessor.class, ctx)); + + startTimer.finishGlobalStage("Start managers"); + startProcessor(createComponent(IgniteCacheObjectProcessor.class, ctx)); + + startTimer.finishGlobalStage("Configure binary metadata"); + startProcessor(createComponent(IGridClusterStateProcessor.class, ctx)); startProcessor(new IgniteAuthenticationProcessor(ctx)); startProcessor(new GridCacheProcessor(ctx)); @@ -973,11 +1141,15 @@ public void start( startProcessor(createComponent(PlatformProcessor.class, ctx)); startProcessor(new GridMarshallerMappingProcessor(ctx)); + startTimer.finishGlobalStage("Start processors"); + // Start plugins. for (PluginProvider provider : ctx.plugins().allProviders()) { ctx.add(new GridPluginComponent(provider)); provider.start(ctx.plugins().pluginContextForProvider(provider)); + + startTimer.finishGlobalStage("Start '"+ provider.name() + "' plugin"); } // Start platform plugins. @@ -987,6 +1159,12 @@ public void start( ctx.cluster().initDiagnosticListeners(); fillNodeAttributes(clusterProc.updateNotifierEnabled()); + + ctx.cache().context().database().startMemoryRestore(ctx, startTimer); + + ctx.recoveryMode(false); + + startTimer.finishGlobalStage("Finish recovery"); } catch (Throwable e) { U.error( @@ -1010,6 +1188,8 @@ public void start( gw.writeUnlock(); } + startTimer.finishGlobalStage("Join topology"); + // Check whether physical RAM is not exceeded. checkPhysicalRam(); @@ -1026,6 +1206,14 @@ public void start( IgniteInternalFuture transitionWaitFut = joinData.transitionWaitFuture(); + // Notify discovery manager the first to make sure that topology is discovered. + // Active flag is not used in managers, so it is safe to pass true. + ctx.discovery().onKernalStart(true); + + // Notify IO manager the second so further components can send and receive messages. + // Must notify the IO manager before transition state await to make sure IO connection can be established. + ctx.io().onKernalStart(true); + boolean active; if (transitionWaitFut != null) { @@ -1039,11 +1227,7 @@ public void start( else active = joinData.active(); - // Notify discovery manager the first to make sure that topology is discovered. - ctx.discovery().onKernalStart(active); - - // Notify IO manager the second so further components can send and receive messages. - ctx.io().onKernalStart(active); + startTimer.finishGlobalStage("Await transition"); boolean recon = false; @@ -1065,7 +1249,13 @@ public void start( comp.onKernalStart(active); } catch (IgniteNeedReconnectException e) { - assert ctx.discovery().reconnectSupported(); + ClusterNode locNode = ctx.discovery().localNode(); + + assert locNode.isClient(); + + if (!ctx.discovery().reconnectSupported()) + throw new IgniteCheckedException("Client node in forceServerMode " + + "is not allowed to reconnect to the cluster and will be stopped."); if (log.isDebugEnabled()) log.debug("Failed to start node components on node start, will wait for reconnect: " + e); @@ -1085,7 +1275,7 @@ public void start( // Register MBeans. mBeansMgr.registerAllMBeans(utilityCachePool, execSvc, svcExecSvc, sysExecSvc, stripedExecSvc, p2pExecSvc, mgmtExecSvc, igfsExecSvc, dataStreamExecSvc, restExecSvc, affExecSvc, idxExecSvc, callbackExecSvc, - qryExecSvc, schemaExecSvc, customExecSvcs); + qryExecSvc, schemaExecSvc, customExecSvcs, ctx.workersRegistry()); // Lifecycle bean notifications. notifyLifecycleBeans(AFTER_NODE_START); @@ -1275,8 +1465,10 @@ private long checkPoolStarvation( }, metricsLogFreq, metricsLogFreq); } - final long longOpDumpTimeout = - IgniteSystemProperties.getLong(IgniteSystemProperties.IGNITE_LONG_OPERATIONS_DUMP_TIMEOUT, 60_000); + final long longOpDumpTimeout = IgniteSystemProperties.getLong( + IgniteSystemProperties.IGNITE_LONG_OPERATIONS_DUMP_TIMEOUT, + DFLT_LONG_OPERATIONS_DUMP_TIMEOUT + ); if (longOpDumpTimeout > 0) { longOpDumpTask = ctx.timeout().schedule(new Runnable() { @@ -1295,10 +1487,27 @@ private long checkPoolStarvation( U.quietAndInfo(log, "To start Console Management & Monitoring run ignitevisorcmd.{sh|bat}"); + if (!IgniteSystemProperties.getBoolean(IgniteSystemProperties.IGNITE_QUIET, true)) + ackClassPathContent(); + ackStart(rtBean); if (!isDaemon()) - ctx.discovery().ackTopology(localNode().order()); + ctx.discovery().ackTopology(ctx.discovery().localJoin().joinTopologyVersion().topologyVersion(), + EventType.EVT_NODE_JOINED, localNode()); + + startTimer.finishGlobalStage("Await exchange"); + } + + /** + * @return GridProcessor that implements {@link IgniteSecurity} + */ + private GridProcessor securityProcessor() throws IgniteCheckedException { + GridSecurityProcessor prc = createComponent(GridSecurityProcessor.class, ctx); + + return prc != null && prc.enabled() + ? new IgniteSecurityProcessor(ctx, prc) + : new NoOpIgniteSecurityProcessor(ctx, prc); } /** @@ -1381,7 +1590,9 @@ private HadoopProcessorAdapter createHadoopComponent() throws IgniteCheckedExcep private void validateCommon(IgniteConfiguration cfg) { A.notNull(cfg.getNodeId(), "cfg.getNodeId()"); - A.notNull(cfg.getMBeanServer(), "cfg.getMBeanServer()"); + if (!U.IGNITE_MBEANS_DISABLED) + A.notNull(cfg.getMBeanServer(), "cfg.getMBeanServer()"); + A.notNull(cfg.getGridLogger(), "cfg.getGridLogger()"); A.notNull(cfg.getMarshaller(), "cfg.getMarshaller()"); A.notNull(cfg.getUserAttributes(), "cfg.getUserAttributes()"); @@ -1475,7 +1686,9 @@ private void suggestOptimizations(IgniteConfiguration cfg) { */ @SuppressWarnings({"SuspiciousMethodCalls", "unchecked", "TypeMayBeWeakened"}) private void fillNodeAttributes(boolean notifyEnabled) throws IgniteCheckedException { + ctx.addNodeAttribute(ATTR_REBALANCE_POOL_SIZE, configuration().getRebalanceThreadPoolSize()); ctx.addNodeAttribute(ATTR_DATA_STREAMER_POOL_SIZE, configuration().getDataStreamerThreadPoolSize()); + ctx.addNodeAttribute(ATTR_USE_POOL_FOR_LAZY_QUERIES, IgniteSystemProperties.getBoolean(IGNITE_USE_POOL_FOR_LAZY_QUERIES)); final String[] incProps = cfg.getIncludeProperties(); @@ -1537,12 +1750,17 @@ private void fillNodeAttributes(boolean notifyEnabled) throws IgniteCheckedExcep // Warn about loopback. if (ips.isEmpty() && macs.isEmpty()) U.warn(log, "Ignite is starting on loopback address... Only nodes on the same physical " + - "computer can participate in topology.", - "Ignite is starting on loopback address..."); + "computer can participate in topology."); // Stick in network context into attributes. add(ATTR_IPS, (ips.isEmpty() ? "" : ips)); - add(ATTR_MACS, (macs.isEmpty() ? "" : macs)); + + Map userAttrs = configuration().getUserAttributes(); + + if (userAttrs != null && userAttrs.get(IgniteNodeAttributes.ATTR_MACS_OVERRIDE) != null) + add(ATTR_MACS, (Serializable)userAttrs.get(IgniteNodeAttributes.ATTR_MACS_OVERRIDE)); + else + add(ATTR_MACS, (macs.isEmpty() ? "" : macs)); // Stick in some system level attributes add(ATTR_JIT_NAME, U.getCompilerMx() == null ? "" : U.getCompilerMx().getName()); @@ -1618,12 +1836,19 @@ private void fillNodeAttributes(boolean notifyEnabled) throws IgniteCheckedExcep if (cfg.getConnectorConfiguration() != null) add(ATTR_REST_PORT_RANGE, cfg.getConnectorConfiguration().getPortRange()); + // Whether rollback of dynamic cache start is supported or not. + // This property is added because of backward compatibility. + add(ATTR_DYNAMIC_CACHE_START_ROLLBACK_SUPPORTED, Boolean.TRUE); + // Save data storage configuration. addDataStorageConfigurationAttributes(); // Save transactions configuration. add(ATTR_TX_CONFIG, cfg.getTransactionConfiguration()); + // Supported features. + add(ATTR_IGNITE_FEATURES, IgniteFeatures.allFeatures()); + // Stick in SPI versions and classes attributes. addSpiAttributes(cfg.getCollisionSpi()); addSpiAttributes(cfg.getDiscoverySpi()); @@ -1715,104 +1940,6 @@ private void startProcessor(GridProcessor proc) throws IgniteCheckedException { } } - /** - * Returns class name filter for marshaller. - * @return Class name filter for marshaller. - */ - private IgnitePredicate classNameFilter() throws IgniteCheckedException { - ClassSet whiteList = classWhiteList(); - ClassSet blackList = classBlackList(); - - return new IgnitePredicate() { - @Override public boolean apply(String s) { - // Allows all primitive arrays and checks arrays' type. - if ((blackList != null || whiteList != null) && s.charAt(0) == '[') { - if (s.charAt(1) == 'L' && s.length() > 2) - s = s.substring(2, s.length() - 1); - else - return true; - } - - return (blackList == null || !blackList.contains(s)) && (whiteList == null || whiteList.contains(s)); - } - }; - } - - /** - * @return White list of classes. - */ - private ClassSet classWhiteList() throws IgniteCheckedException { - ClassSet clsSet = null; - - String fileName = IgniteSystemProperties.getString(IgniteSystemProperties.IGNITE_MARSHALLER_WHITELIST); - - if (fileName != null) { - clsSet = new ClassSet(); - - addClassNames(JDK_CLS_NAMES_FILE, clsSet); - addClassNames(CLS_NAMES_FILE, clsSet); - addClassNames(fileName, clsSet); - } - - return clsSet; - } - - /** - * @return Black list of classes. - */ - private ClassSet classBlackList() throws IgniteCheckedException { - ClassSet clsSet = null; - - String blackListFileName = IgniteSystemProperties.getString(IgniteSystemProperties.IGNITE_MARSHALLER_BLACKLIST); - - if (blackListFileName != null) - addClassNames(blackListFileName, clsSet = new ClassSet()); - - return clsSet; - } - - - /** - * Reads class names from resource referred by given system property name and returns set of classes. - * @param fileName File name containing list of classes. - * @param clsSet Class set for update. - * @return Set of classes. - */ - private void addClassNames(String fileName, ClassSet clsSet) throws IgniteCheckedException { - InputStream is = this.getClass().getClassLoader().getResourceAsStream(fileName); - - if (is == null) { - try { - is = new FileInputStream(new File(fileName)); - } - catch (FileNotFoundException e) { - throw new IgniteCheckedException("File " + fileName + " not found."); - } - } - - try (BufferedReader reader = new BufferedReader(new InputStreamReader(is))) { - String line; - - for (int i = 1; (line = reader.readLine()) != null; i++) { - String s = line.trim(); - - if (!s.isEmpty() && s.charAt(0) != '#' && s.charAt(0) != '[') { - try { - clsSet.add(s); - } - catch (IllegalArgumentException e) { - throw new IgniteCheckedException("Exception occurred while reading list of classes" + - "[path=" + fileName + ", row=" + i + ", line=" + s + ']', e); - } - } - } - } - catch (IOException e) { - throw new IgniteCheckedException("Exception occurred while reading and creating list of classes " + - "[path=" + fileName + ']', e); - } - } - /** * Add helper. * @@ -1833,7 +1960,6 @@ private String onOff(boolean b) { } /** - * * @return Whether or not REST is enabled. */ private boolean isRestEnabled() { @@ -1900,7 +2026,6 @@ private void ackConfigUrl() { log.info("Config URL: " + System.getProperty(IGNITE_CONFIG_URL, "n/a")); } - /** * Acks configuration. */ @@ -1918,7 +2043,7 @@ private void ackLogger() { assert log != null; if (log.isInfoEnabled()) - log.info("Logger: " + log.getLoggerInfo() ); + log.info("Logger: " + log.getLoggerInfo()); } /** @@ -2019,6 +2144,11 @@ private void ackStart(RuntimeMXBean rtBean) { log.info(str); } + + if (!ctx.state().clusterState().active()) { + U.quietAndInfo(log, ">>> Ignite cluster is not active (limited functionality available). " + + "Use control.(sh|bat) script or IgniteCluster interface to activate."); + } } /** @@ -2186,6 +2316,9 @@ else if (state == STARTING) if (longOpDumpTask != null) longOpDumpTask.close(); + if (longJVMPauseDetector != null) + longJVMPauseDetector.stop(); + boolean interrupted = false; while (true) { @@ -2277,10 +2410,10 @@ else if (state == STARTING) if (!errOnStop) U.quiet(false, "Ignite node stopped OK [" + nodeName + "uptime=" + - X.timeSpan2HMSM(U.currentTimeMillis() - startTime) + ']'); + X.timeSpan2DHMSM(U.currentTimeMillis() - startTime) + ']'); else U.quiet(true, "Ignite node stopped wih ERRORS [" + nodeName + "uptime=" + - X.timeSpan2HMSM(U.currentTimeMillis() - startTime) + ']'); + X.timeSpan2DHMSM(U.currentTimeMillis() - startTime) + ']'); } if (log.isInfoEnabled()) @@ -2295,7 +2428,7 @@ else if (state == STARTING) ">>> " + ack + NL + ">>> " + dash + NL + (igniteInstanceName == null ? "" : ">>> Ignite instance name: " + igniteInstanceName + NL) + - ">>> Grid uptime: " + X.timeSpan2HMSM(U.currentTimeMillis() - startTime) + + ">>> Grid uptime: " + X.timeSpan2DHMSM(U.currentTimeMillis() - startTime) + NL + NL); } @@ -2309,7 +2442,7 @@ else if (state == STARTING) ">>> " + ack + NL + ">>> " + dash + NL + (igniteInstanceName == null ? "" : ">>> Ignite instance name: " + igniteInstanceName + NL) + - ">>> Grid uptime: " + X.timeSpan2HMSM(U.currentTimeMillis() - startTime) + + ">>> Grid uptime: " + X.timeSpan2DHMSM(U.currentTimeMillis() - startTime) + NL + ">>> See log above for detailed error message." + NL + ">>> Note that some errors during stop can prevent grid from" + NL + @@ -2325,6 +2458,8 @@ else if (state == STARTING) // Preserve interrupt status. Thread.currentThread().interrupt(); } + + ctx.ioStats().stop(); } else { // Proper notification. @@ -2342,8 +2477,8 @@ else if (state == STARTING) * USED ONLY FOR TESTING. * * @param name Cache name. - * @param Key type. - * @param Value type. + * @param Key type. + * @param Value type. * @return Internal cache instance. */ /*@java.test.only*/ @@ -2407,7 +2542,6 @@ private void ackDaemon() { } /** - * * @return {@code True} is this node is daemon. */ private boolean isDaemon() { @@ -2417,11 +2551,8 @@ private boolean isDaemon() { } /** - * Whether or not remote JMX management is enabled for this node. Remote JMX management is - * enabled when the following system property is set: - *
    - *
  • {@code com.sun.management.jmxremote}
  • - *
+ * Whether or not remote JMX management is enabled for this node. Remote JMX management is enabled when the + * following system property is set:
  • {@code com.sun.management.jmxremote}
* * @return {@code True} if remote JMX management is enabled - {@code false} otherwise. */ @@ -2430,9 +2561,9 @@ private boolean isDaemon() { } /** - * Whether or not node restart is enabled. Node restart us supported when this node was started - * with {@code bin/ignite.{sh|bat}} script using {@code -r} argument. Node can be - * programmatically restarted using {@link Ignition#restart(boolean)}} method. + * Whether or not node restart is enabled. Node restart us supported when this node was started with {@code + * bin/ignite.{sh|bat}} script using {@code -r} argument. Node can be programmatically restarted using {@link + * Ignition#restart(boolean)}} method. * * @return {@code True} if restart mode is enabled, {@code false} otherwise. * @see Ignition#restart(boolean) @@ -2466,19 +2597,41 @@ private void ackSpis() { * */ private void ackRebalanceConfiguration() throws IgniteCheckedException { - if (cfg.getSystemThreadPoolSize() <= cfg.getRebalanceThreadPoolSize()) - throw new IgniteCheckedException("Rebalance thread pool size exceed or equals System thread pool size. " + - "Change IgniteConfiguration.rebalanceThreadPoolSize property before next start."); + if (cfg.isClientMode()) { + if (cfg.getRebalanceThreadPoolSize() != IgniteConfiguration.DFLT_REBALANCE_THREAD_POOL_SIZE) + U.warn(log, "Setting the rebalance pool size has no effect on the client mode"); + } + else { + if (cfg.getSystemThreadPoolSize() <= cfg.getRebalanceThreadPoolSize()) + throw new IgniteCheckedException("Rebalance thread pool size exceed or equals System thread pool size. " + + "Change IgniteConfiguration.rebalanceThreadPoolSize property before next start."); - if (cfg.getRebalanceThreadPoolSize() < 1) - throw new IgniteCheckedException("Rebalance thread pool size minimal allowed value is 1. " + - "Change IgniteConfiguration.rebalanceThreadPoolSize property before next start."); + if (cfg.getRebalanceThreadPoolSize() < 1) + throw new IgniteCheckedException("Rebalance thread pool size minimal allowed value is 1. " + + "Change IgniteConfiguration.rebalanceThreadPoolSize property before next start."); - for (CacheConfiguration ccfg : cfg.getCacheConfiguration()) { - if (ccfg.getRebalanceBatchesPrefetchCount() < 1) + if (cfg.getRebalanceBatchesPrefetchCount() < 1) throw new IgniteCheckedException("Rebalance batches prefetch count minimal allowed value is 1. " + - "Change CacheConfiguration.rebalanceBatchesPrefetchCount property before next start. " + - "[cache=" + ccfg.getName() + "]"); + "Change IgniteConfiguration.rebalanceBatchesPrefetchCount property before next start."); + + if (cfg.getRebalanceBatchSize() <= 0) + throw new IgniteCheckedException("Rebalance batch size must be greater than zero. " + + "Change IgniteConfiguration.rebalanceBatchSize property before next start."); + + if (cfg.getRebalanceThrottle() < 0) + throw new IgniteCheckedException("Rebalance throttle can't have negative value. " + + "Change IgniteConfiguration.rebalanceThrottle property before next start."); + + if (cfg.getRebalanceTimeout() < 0) + throw new IgniteCheckedException("Rebalance message timeout can't have negative value. " + + "Change IgniteConfiguration.rebalanceTimeout property before next start."); + + for (CacheConfiguration ccfg : cfg.getCacheConfiguration()) { + if (ccfg.getRebalanceBatchesPrefetchCount() < 1) + throw new IgniteCheckedException("Rebalance batches prefetch count minimal allowed value is 1. " + + "Change CacheConfiguration.rebalanceBatchesPrefetchCount property before next start. " + + "[cache=" + ccfg.getName() + "]"); + } } } @@ -2550,9 +2703,7 @@ private void ackP2pConfiguration() { U.warn( log, "Peer class loading is enabled (disable it in production for performance and " + - "deployment consistency reasons)", - "Peer class loading is enabled (disable it for better performance)" - ); + "deployment consistency reasons)"); } /** @@ -2623,6 +2774,9 @@ private Iterable lifecycleAwares(IgniteConfiguration cfg) { objs.add(cfg.getGridLogger()); objs.add(cfg.getMBeanServer()); + if (cfg.getCommunicationFailureResolver() != null) + objs.add(cfg.getCommunicationFailureResolver()); + return objs; } @@ -2806,7 +2960,6 @@ public IgniteInternalCache getCache(String name) { } } - /** {@inheritDoc} */ @Override public Collection createCaches(Collection cacheCfgs) { A.notNull(cacheCfgs, "cacheCfgs"); @@ -2864,11 +3017,13 @@ public IgniteInternalCache getCache(String name) { } /** {@inheritDoc} */ - @SuppressWarnings("unchecked") @Override public IgniteBiTuple, Boolean> getOrCreateCache0( CacheConfiguration cacheCfg, boolean sql) { A.notNull(cacheCfg, "cacheCfg"); - CU.validateCacheName(cacheCfg.getName()); + + String cacheName = cacheCfg.getName(); + + CU.validateCacheName(cacheName); guard(); @@ -2877,18 +3032,22 @@ public IgniteInternalCache getCache(String name) { Boolean res = false; - if (ctx.cache().cache(cacheCfg.getName()) == null) { + IgniteCacheProxy cache = ctx.cache().publicJCache(cacheName, false, true); + + if (cache == null) { res = sql ? ctx.cache().dynamicStartSqlCache(cacheCfg).get() : - ctx.cache().dynamicStartCache(cacheCfg, - cacheCfg.getName(), - null, - false, - true, - true).get(); + ctx.cache().dynamicStartCache(cacheCfg, + cacheName, + null, + false, + true, + true).get(); + + return new IgniteBiTuple<>(ctx.cache().publicJCache(cacheName), res); } - - return new IgniteBiTuple<>((IgniteCache)ctx.cache().publicJCache(cacheCfg.getName()), res); + else + return new IgniteBiTuple<>(cache, res); } catch (IgniteCheckedException e) { throw CU.convertToCacheException(e); @@ -3136,7 +3295,7 @@ public IgniteInternalFuture destroyCacheAsync(String cacheName, boolean try { checkClusterState(); - return ctx.cache().dynamicDestroyCache(cacheName, sql, checkThreadTx, false); + return ctx.cache().dynamicDestroyCache(cacheName, sql, checkThreadTx, false, null); } finally { unguard(); @@ -3156,7 +3315,7 @@ public IgniteInternalFuture destroyCachesAsync(Collection cacheNames, try { checkClusterState(); - return ctx.cache().dynamicDestroyCaches(cacheNames, checkThreadTx, false); + return ctx.cache().dynamicDestroyCaches(cacheNames, checkThreadTx); } finally { unguard(); @@ -3172,10 +3331,15 @@ public IgniteInternalFuture destroyCachesAsync(Collection cacheNames, try { checkClusterState(); - if (ctx.cache().cache(cacheName) == null) + IgniteCacheProxy cache = ctx.cache().publicJCache(cacheName, false, true); + + if (cache == null) { ctx.cache().getOrCreateFromTemplate(cacheName, true).get(); - return ctx.cache().publicJCache(cacheName); + return ctx.cache().publicJCache(cacheName); + } + + return cache; } catch (IgniteCheckedException e) { throw CU.convertToCacheException(e); @@ -3439,6 +3603,7 @@ public IgniteInternalFuture getOrCreateCacheAsync(String cacheName, String te Ignition.stop(igniteInstanceName, true); } + /** {@inheritDoc} */ @Override public Affinity affinity(String cacheName) { CU.validateCacheName(cacheName); checkClusterState(); @@ -3890,12 +4055,17 @@ public void onReconnected(final boolean clusterRestarted) { } catch (IgniteCheckedException e) { if (!X.hasCause(e, IgniteNeedReconnectException.class, - IgniteClientDisconnectedCheckedException.class)) { + IgniteClientDisconnectedCheckedException.class, + IgniteInterruptedCheckedException.class)) { U.error(log, "Failed to reconnect, will stop node.", e); reconnectState.firstReconnectFut.onDone(e); - close(); + new Thread(() -> { + U.error(log, "Stopping the node after a failed reconnect attempt."); + + close(); + }, "node-stopper").start(); } else { assert ctx.discovery().reconnectSupported(); @@ -3923,7 +4093,8 @@ public void onReconnected(final boolean clusterRestarted) { if (err != null) { U.error(log, "Failed to reconnect, will stop node", err); - close(); + if (!X.hasCause(err, NodeStoppingException.class)) + close(); } } @@ -3953,6 +4124,9 @@ private static T createComponent(Class cls, GridKer if (cls.equals(IGridClusterStateProcessor.class)) return (T)new GridClusterStateProcessor(ctx); + if(cls.equals(GridSecurityProcessor.class)) + return null; + Class implCls = null; try { @@ -4016,7 +4190,6 @@ private static String componentClassName(Class cls) { /** * @return IgniteKernal instance. - * * @throws ObjectStreamException If failed. */ protected Object readResolve() throws ObjectStreamException { @@ -4128,179 +4301,6 @@ void waitPreviousReconnect() { } } - /** - * Class that registers and unregisters MBeans for kernal. - */ - private class MBeansManager { - /** MBean names stored to be unregistered later. */ - private final Set mBeanNames = new HashSet<>(); - - /** - * Registers all kernal MBeans (for kernal, metrics, thread pools). - * - * @param utilityCachePool Utility cache pool - * @param execSvc Executor service - * @param sysExecSvc System executor service - * @param stripedExecSvc Striped executor - * @param p2pExecSvc P2P executor service - * @param mgmtExecSvc Management executor service - * @param igfsExecSvc IGFS executor service - * @param dataStreamExecSvc data stream executor service - * @param restExecSvc Reset executor service - * @param affExecSvc Affinity executor service - * @param idxExecSvc Indexing executor service - * @param callbackExecSvc Callback executor service - * @param qryExecSvc Query executor service - * @param schemaExecSvc Schema executor service - * @param customExecSvcs Custom named executors - * - * @throws IgniteCheckedException if fails to register any of the MBeans - */ - private void registerAllMBeans( - ExecutorService utilityCachePool, - final ExecutorService execSvc, - final ExecutorService svcExecSvc, - final ExecutorService sysExecSvc, - final StripedExecutor stripedExecSvc, - ExecutorService p2pExecSvc, - ExecutorService mgmtExecSvc, - ExecutorService igfsExecSvc, - StripedExecutor dataStreamExecSvc, - ExecutorService restExecSvc, - ExecutorService affExecSvc, - @Nullable ExecutorService idxExecSvc, - IgniteStripedThreadPoolExecutor callbackExecSvc, - ExecutorService qryExecSvc, - ExecutorService schemaExecSvc, - @Nullable final Map customExecSvcs - ) throws IgniteCheckedException { - if (U.IGNITE_MBEANS_DISABLED) - return; - - // Kernal - registerMBean("Kernal", IgniteKernal.class.getSimpleName(), IgniteKernal.this, IgniteMXBean.class); - - // Metrics - ClusterMetricsMXBean locMetricsBean = new ClusterLocalNodeMetricsMXBeanImpl(ctx.discovery()); - registerMBean("Kernal", locMetricsBean.getClass().getSimpleName(), locMetricsBean, ClusterMetricsMXBean.class); - ClusterMetricsMXBean metricsBean = new ClusterMetricsMXBeanImpl(cluster()); - registerMBean("Kernal", metricsBean.getClass().getSimpleName(), metricsBean, ClusterMetricsMXBean.class); - - // Executors - registerExecutorMBean("GridUtilityCacheExecutor", utilityCachePool); - registerExecutorMBean("GridExecutionExecutor", execSvc); - registerExecutorMBean("GridServicesExecutor", svcExecSvc); - registerExecutorMBean("GridSystemExecutor", sysExecSvc); - registerExecutorMBean("GridClassLoadingExecutor", p2pExecSvc); - registerExecutorMBean("GridManagementExecutor", mgmtExecSvc); - registerExecutorMBean("GridIgfsExecutor", igfsExecSvc); - registerExecutorMBean("GridDataStreamExecutor", dataStreamExecSvc); - registerExecutorMBean("GridAffinityExecutor", affExecSvc); - registerExecutorMBean("GridCallbackExecutor", callbackExecSvc); - registerExecutorMBean("GridQueryExecutor", qryExecSvc); - registerExecutorMBean("GridSchemaExecutor", schemaExecSvc); - - if (idxExecSvc != null) - registerExecutorMBean("GridIndexingExecutor", idxExecSvc); - - if (cfg.getConnectorConfiguration() != null) - registerExecutorMBean("GridRestExecutor", restExecSvc); - - if (stripedExecSvc != null) { - // striped executor uses a custom adapter - registerMBean("Thread Pools", - "StripedExecutor", - new StripedExecutorMXBeanAdapter(stripedExecSvc), - StripedExecutorMXBean.class); - } - - if (customExecSvcs != null) { - for (Map.Entry entry : customExecSvcs.entrySet()) - registerExecutorMBean(entry.getKey(), entry.getValue()); - } - } - - /** - * Registers a {@link ThreadPoolMXBean} for an executor. - * - * @param name name of the bean to register - * @param exec executor to register a bean for - * - * @throws IgniteCheckedException if registration fails. - */ - private void registerExecutorMBean(String name, ExecutorService exec) throws IgniteCheckedException { - registerMBean("Thread Pools", name, new ThreadPoolMXBeanAdapter(exec), ThreadPoolMXBean.class); - } - - /** - * Register an Ignite MBean. - * - * @param grp bean group name - * @param name bean name - * @param impl bean implementation - * @param itf bean interface - * @param bean type - * - * @throws IgniteCheckedException if registration fails - */ - private void registerMBean(String grp, String name, T impl, Class itf) throws IgniteCheckedException { - assert !U.IGNITE_MBEANS_DISABLED; - - try { - ObjectName objName = U.registerMBean( - cfg.getMBeanServer(), - cfg.getIgniteInstanceName(), - grp, name, impl, itf); - - if (log.isDebugEnabled()) - log.debug("Registered MBean: " + objName); - - mBeanNames.add(objName); - } - catch (JMException e) { - throw new IgniteCheckedException("Failed to register MBean " + name, e); - } - } - - /** - * Unregisters all previously registered MBeans. - * - * @return {@code true} if all mbeans were unregistered successfully; {@code false} otherwise. - */ - private boolean unregisterAllMBeans() { - boolean success = true; - - for (ObjectName name : mBeanNames) - success = success && unregisterMBean(name); - - return success; - } - - /** - * Unregisters given MBean. - * - * @param mbean MBean to unregister. - * @return {@code true} if successfully unregistered, {@code false} otherwise. - */ - private boolean unregisterMBean(ObjectName mbean) { - assert !U.IGNITE_MBEANS_DISABLED; - - try { - cfg.getMBeanServer().unregisterMBean(mbean); - - if (log.isDebugEnabled()) - log.debug("Unregistered MBean: " + mbean); - - return true; - } - catch (JMException e) { - U.error(log, "Failed to unregister MBean.", e); - - return false; - } - } - } - /** {@inheritDoc} */ @Override public void runIoTest( long warmup, @@ -4315,6 +4315,11 @@ private boolean unregisterMBean(ObjectName mbean) { new ArrayList(ctx.cluster().get().forServers().forRemotes().nodes())); } + /** {@inheritDoc} */ + @Override public void clearNodeLocalMap() { + ctx.cluster().get().clearNodeMap(); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(IgniteKernal.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java index 4c23dd5a24397..8d992a870ade1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteMessagingImpl.java @@ -241,6 +241,9 @@ private void send0(@Nullable Object topic, Collection msgs, boolean async) th false, prj.predicate())); } + catch (IgniteCheckedException e) { + throw U.convertException(e); + } finally { unguard(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteNeedReconnectException.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteNeedReconnectException.java index f3849500d3561..c26f4da03107a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteNeedReconnectException.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteNeedReconnectException.java @@ -29,12 +29,10 @@ public class IgniteNeedReconnectException extends IgniteCheckedException { private static final long serialVersionUID = 0L; /** - * @param locNode Local node. + * @param node Node that should reconnect. * @param cause Cause. */ - public IgniteNeedReconnectException(ClusterNode locNode, @Nullable Throwable cause) { - super("Local node need try to reconnect [locNodeId=" + locNode.id() + ']', cause); - - assert locNode.isClient() : locNode; + public IgniteNeedReconnectException(ClusterNode node, @Nullable Throwable cause) { + super("Node need try to reconnect [nodeId=" + node.id() + ']', cause); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java index 073369fdd2efb..802e6262cf0e4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteNodeAttributes.java @@ -120,6 +120,9 @@ public final class IgniteNodeAttributes { /** Internal attribute name constant. */ public static final String ATTR_MACS = ATTR_PREFIX + ".macs"; + /** Allows to override {@link #ATTR_MACS} by adding this attribute in the user attributes. */ + public static final String ATTR_MACS_OVERRIDE = "override." + ATTR_MACS; + /** Internal attribute name constant. */ public static final String ATTR_PHY_RAM = ATTR_PREFIX + ".phy.ram"; @@ -196,6 +199,20 @@ public final class IgniteNodeAttributes { /** User authentication enabled flag. */ public static final String ATTR_AUTHENTICATION_ENABLED = ATTR_PREFIX + ".authentication.enabled"; + /** Rebalance thread pool size. */ + public static final String ATTR_REBALANCE_POOL_SIZE = ATTR_PREFIX + ".rebalance.pool.size"; + + /** Internal attribute name constant. */ + public static final String ATTR_DYNAMIC_CACHE_START_ROLLBACK_SUPPORTED = ATTR_PREFIX + ".dynamic.cache.start.rollback.supported"; + + /** Internal attribute indicates that incoming cache requests should be validated on primary node as well. */ + public static final String ATTR_VALIDATE_CACHE_REQUESTS = ATTR_CACHE + ".validate.cache.requests"; + + /** Supported features. */ + public static final String ATTR_IGNITE_FEATURES = ATTR_PREFIX + ".features"; + + /** */ + public static final String ATTR_USE_POOL_FOR_LAZY_QUERIES = ATTR_PREFIX + ".query.lazy.usepool"; /** * Enforces singleton. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgniteVersionUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/IgniteVersionUtils.java index 8a459522e70b2..cd17078f58b6c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgniteVersionUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgniteVersionUtils.java @@ -19,6 +19,7 @@ import java.text.SimpleDateFormat; import java.util.Date; +import java.util.TimeZone; import org.apache.ignite.lang.IgniteProductVersion; /** @@ -31,12 +32,18 @@ public class IgniteVersionUtils { /** Ignite version. */ public static final IgniteProductVersion VER; + /** UTC build date formatter. */ + private static final SimpleDateFormat BUILD_TSTAMP_DATE_FORMATTER; + /** Formatted build date. */ public static final String BUILD_TSTAMP_STR; /** Build timestamp in seconds. */ public static final long BUILD_TSTAMP; + /** Build timestamp string property value. */ + private static final String BUILD_TSTAMP_FROM_PROPERTY; + /** Revision hash. */ public static final String REV_HASH_STR; @@ -47,7 +54,7 @@ public class IgniteVersionUtils { public static final String ACK_VER_STR; /** Copyright blurb. */ - public static final String COPYRIGHT = "2018 Copyright(C) Apache Software Foundation"; + public static final String COPYRIGHT; /** * Static initializer. @@ -58,10 +65,22 @@ public class IgniteVersionUtils { .replace(".b", "-b") .replace(".final", "-final"); - BUILD_TSTAMP = Long.valueOf(IgniteProperties.get("ignite.build")); - BUILD_TSTAMP_STR = new SimpleDateFormat("yyyyMMdd").format(new Date(BUILD_TSTAMP * 1000)); + BUILD_TSTAMP_FROM_PROPERTY = IgniteProperties.get("ignite.build"); + + //Development ignite.properties file contains ignite.build = 0, so we will add the check for it. + BUILD_TSTAMP = !BUILD_TSTAMP_FROM_PROPERTY.isEmpty() && Long.parseLong(BUILD_TSTAMP_FROM_PROPERTY) != 0 + ? Long.parseLong(BUILD_TSTAMP_FROM_PROPERTY) : System.currentTimeMillis() / 1000; + + BUILD_TSTAMP_DATE_FORMATTER = new SimpleDateFormat("yyyyMMdd"); + + BUILD_TSTAMP_DATE_FORMATTER.setTimeZone(TimeZone.getTimeZone("UTC")); + + BUILD_TSTAMP_STR = formatBuildTimeStamp(BUILD_TSTAMP * 1000); + + COPYRIGHT = BUILD_TSTAMP_STR.substring(0, 4) + " Copyright(C) Apache Software Foundation"; REV_HASH_STR = IgniteProperties.get("ignite.revision"); + RELEASE_DATE_STR = IgniteProperties.get("ignite.rel.date"); String rev = REV_HASH_STR.length() > 8 ? REV_HASH_STR.substring(0, 8) : REV_HASH_STR; @@ -71,6 +90,17 @@ public class IgniteVersionUtils { VER = IgniteProductVersion.fromString(VER_STR + '-' + BUILD_TSTAMP + '-' + REV_HASH_STR); } + /** + * Builds string date representation in "yyyyMMdd" format. + * "synchronized" because it uses {@link SimpleDateFormat} which is not threadsafe. + * + * @param ts Timestamp. + * @return Timestamp date in UTC timezone. + */ + public static synchronized String formatBuildTimeStamp(long ts) { + return BUILD_TSTAMP_DATE_FORMATTER.format(new Date(ts)); + } + /** * Private constructor. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java index 8073faab8c8fa..e4cb2948737d0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/IgnitionEx.java @@ -17,9 +17,13 @@ package org.apache.ignite.internal; +import javax.management.JMException; +import javax.management.MBeanServer; +import javax.management.ObjectName; import java.io.File; import java.io.IOException; import java.io.InputStream; +import java.lang.Thread.UncaughtExceptionHandler; import java.lang.management.ManagementFactory; import java.lang.reflect.Constructor; import java.net.MalformedURLException; @@ -45,9 +49,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Handler; -import javax.management.JMException; -import javax.management.MBeanServer; -import javax.management.ObjectName; import org.apache.ignite.Ignite; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; @@ -75,6 +76,7 @@ import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.binary.BinaryMarshaller; import org.apache.ignite.internal.managers.communication.GridIoPolicy; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; import org.apache.ignite.internal.processors.datastructures.DataStructuresProcessor; import org.apache.ignite.internal.processors.igfs.IgfsThreadFactory; import org.apache.ignite.internal.processors.igfs.IgfsUtils; @@ -82,15 +84,22 @@ import org.apache.ignite.internal.util.GridConcurrentHashSet; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.TimeBag; import org.apache.ignite.internal.util.spring.IgniteSpringHelper; import org.apache.ignite.internal.util.typedef.CA; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.G; import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.util.worker.GridWorker; +import org.apache.ignite.internal.worker.WorkersRegistry; +import org.apache.ignite.lang.IgniteBiInClosure; import org.apache.ignite.lang.IgniteBiTuple; +import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.logger.LoggerNodeIdAware; import org.apache.ignite.logger.java.JavaLogger; import org.apache.ignite.marshaller.Marshaller; @@ -117,6 +126,7 @@ import org.apache.ignite.thread.IgniteThreadPoolExecutor; import org.jetbrains.annotations.Nullable; +import static java.util.stream.Collectors.joining; import static org.apache.ignite.IgniteState.STARTED; import static org.apache.ignite.IgniteState.STOPPED; import static org.apache.ignite.IgniteState.STOPPED_ON_FAILURE; @@ -129,6 +139,8 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_NO_SHUTDOWN_HOOK; import static org.apache.ignite.IgniteSystemProperties.IGNITE_RESTART_CODE; import static org.apache.ignite.IgniteSystemProperties.IGNITE_SUCCESS_FILE; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_SYSTEM_WORKER_BLOCKED_TIMEOUT; +import static org.apache.ignite.IgniteSystemProperties.getLong; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; import static org.apache.ignite.cache.CacheMode.REPLICATED; import static org.apache.ignite.cache.CacheRebalanceMode.SYNC; @@ -136,6 +148,7 @@ import static org.apache.ignite.configuration.IgniteConfiguration.DFLT_THREAD_KEEP_ALIVE_TIME; import static org.apache.ignite.configuration.MemoryConfiguration.DFLT_MEMORY_POLICY_MAX_SIZE; import static org.apache.ignite.configuration.MemoryConfiguration.DFLT_MEM_PLC_DEFAULT_NAME; +import static org.apache.ignite.failure.FailureType.SYSTEM_WORKER_TERMINATION; import static org.apache.ignite.internal.IgniteComponentType.SPRING; import static org.apache.ignite.plugin.segmentation.SegmentationPolicy.RESTART_JVM; @@ -410,7 +423,7 @@ public void run() { " milliseconds. Killing node..."); // We are not able to kill only one grid so whole JVM will be stopped. - System.exit(Ignition.KILL_EXIT_CODE); + Runtime.getRuntime().halt(Ignition.KILL_EXIT_CODE); } } }, timeoutMs, TimeUnit.MILLISECONDS); @@ -1716,7 +1729,17 @@ synchronized void start(GridStartContext startCtx) throws IgniteCheckedException try { starterThread = Thread.currentThread(); - start0(startCtx); + IgniteConfiguration myCfg = initializeConfiguration( + startCtx.config() != null ? startCtx.config() : new IgniteConfiguration() + ); + + TimeBag startNodeTimer = new TimeBag(TimeUnit.MILLISECONDS); + + start0(startCtx, myCfg, startNodeTimer); + + if (log.isInfoEnabled()) + log.info("Node started : " + + startNodeTimer.stagesTimings().stream().collect(joining(",", "[", "]"))); } catch (Exception e) { if (log != null) @@ -1737,31 +1760,35 @@ synchronized void start(GridStartContext startCtx) throws IgniteCheckedException * @throws IgniteCheckedException If start failed. */ @SuppressWarnings({"unchecked", "TooBroadScope"}) - private void start0(GridStartContext startCtx) throws IgniteCheckedException { + private void start0(GridStartContext startCtx, IgniteConfiguration cfg, TimeBag startTimer) + throws IgniteCheckedException { assert grid == null : "Grid is already started: " + name; - IgniteConfiguration cfg = startCtx.config() != null ? startCtx.config() : new IgniteConfiguration(); - - IgniteConfiguration myCfg = initializeConfiguration(cfg); - // Set configuration URL, if any, into system property. if (startCtx.configUrl() != null) System.setProperty(IGNITE_CONFIG_URL, startCtx.configUrl().toString()); // Ensure that SPIs support multiple grid instances, if required. if (!startCtx.single()) { - ensureMultiInstanceSupport(myCfg.getDeploymentSpi()); - ensureMultiInstanceSupport(myCfg.getCommunicationSpi()); - ensureMultiInstanceSupport(myCfg.getDiscoverySpi()); - ensureMultiInstanceSupport(myCfg.getCheckpointSpi()); - ensureMultiInstanceSupport(myCfg.getEventStorageSpi()); - ensureMultiInstanceSupport(myCfg.getCollisionSpi()); - ensureMultiInstanceSupport(myCfg.getFailoverSpi()); - ensureMultiInstanceSupport(myCfg.getLoadBalancingSpi()); + ensureMultiInstanceSupport(cfg.getDeploymentSpi()); + ensureMultiInstanceSupport(cfg.getCommunicationSpi()); + ensureMultiInstanceSupport(cfg.getDiscoverySpi()); + ensureMultiInstanceSupport(cfg.getCheckpointSpi()); + ensureMultiInstanceSupport(cfg.getEventStorageSpi()); + ensureMultiInstanceSupport(cfg.getCollisionSpi()); + ensureMultiInstanceSupport(cfg.getFailoverSpi()); + ensureMultiInstanceSupport(cfg.getLoadBalancingSpi()); } validateThreadPoolSize(cfg.getPublicThreadPoolSize(), "public"); + UncaughtExceptionHandler oomeHnd = new UncaughtExceptionHandler() { + @Override public void uncaughtException(Thread t, Throwable e) { + if (grid != null && X.hasCause(e, OutOfMemoryError.class)) + grid.context().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + } + }; + execSvc = new IgniteThreadPoolExecutor( "pub", cfg.getIgniteInstanceName(), @@ -1769,7 +1796,8 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { cfg.getPublicThreadPoolSize(), DFLT_THREAD_KEEP_ALIVE_TIME, new LinkedBlockingQueue(), - GridIoPolicy.PUBLIC_POOL); + GridIoPolicy.PUBLIC_POOL, + oomeHnd); execSvc.allowCoreThreadTimeOut(true); @@ -1782,7 +1810,8 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { cfg.getServiceThreadPoolSize(), DFLT_THREAD_KEEP_ALIVE_TIME, new LinkedBlockingQueue(), - GridIoPolicy.SERVICE_POOL); + GridIoPolicy.SERVICE_POOL, + oomeHnd); svcExecSvc.allowCoreThreadTimeOut(true); @@ -1795,17 +1824,38 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { cfg.getSystemThreadPoolSize(), DFLT_THREAD_KEEP_ALIVE_TIME, new LinkedBlockingQueue(), - GridIoPolicy.SYSTEM_POOL); + GridIoPolicy.SYSTEM_POOL, + oomeHnd); sysExecSvc.allowCoreThreadTimeOut(true); validateThreadPoolSize(cfg.getStripedPoolSize(), "stripedPool"); + WorkersRegistry workerRegistry = new WorkersRegistry( + new IgniteBiInClosure() { + @Override public void apply(GridWorker deadWorker, FailureType failureType) { + if (grid != null) + grid.context().failure().process(new FailureContext( + failureType, + new IgniteException(S.toString(GridWorker.class, deadWorker)))); + } + }, + getLong(IGNITE_SYSTEM_WORKER_BLOCKED_TIMEOUT, cfg.getSystemWorkerBlockedTimeout()), + log + ); + stripedExecSvc = new StripedExecutor( cfg.getStripedPoolSize(), cfg.getIgniteInstanceName(), "sys", - log); + log, + new IgniteInClosure() { + @Override public void apply(Throwable t) { + if (grid != null) + grid.context().failure().process(new FailureContext(SYSTEM_WORKER_TERMINATION, t)); + } + }, + workerRegistry); // Note that since we use 'LinkedBlockingQueue', number of // maximum threads has no effect. @@ -1820,7 +1870,8 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { cfg.getManagementThreadPoolSize(), DFLT_THREAD_KEEP_ALIVE_TIME, new LinkedBlockingQueue(), - GridIoPolicy.MANAGEMENT_POOL); + GridIoPolicy.MANAGEMENT_POOL, + oomeHnd); mgmtExecSvc.allowCoreThreadTimeOut(true); @@ -1836,7 +1887,8 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { cfg.getPeerClassLoadingThreadPoolSize(), DFLT_THREAD_KEEP_ALIVE_TIME, new LinkedBlockingQueue(), - GridIoPolicy.P2P_POOL); + GridIoPolicy.P2P_POOL, + oomeHnd); p2pExecSvc.allowCoreThreadTimeOut(true); @@ -1845,7 +1897,14 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { cfg.getIgniteInstanceName(), "data-streamer", log, - true); + new IgniteInClosure() { + @Override public void apply(Throwable t) { + if (grid != null) + grid.context().failure().process(new FailureContext(SYSTEM_WORKER_TERMINATION, t)); + } + }, + true, + workerRegistry); // Note that we do not pre-start threads here as igfs pool may not be needed. validateThreadPoolSize(cfg.getIgfsThreadPoolSize(), "IGFS"); @@ -1854,7 +1913,7 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { cfg.getIgfsThreadPoolSize(), cfg.getIgfsThreadPoolSize(), DFLT_THREAD_KEEP_ALIVE_TIME, - new LinkedBlockingQueue(), + new LinkedBlockingQueue<>(), new IgfsThreadFactory(cfg.getIgniteInstanceName(), "igfs")); igfsExecSvc.allowCoreThreadTimeOut(true); @@ -1865,33 +1924,37 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { callbackExecSvc = new IgniteStripedThreadPoolExecutor( cfg.getAsyncCallbackPoolSize(), cfg.getIgniteInstanceName(), - "callback"); + "callback", + oomeHnd); - if (myCfg.getConnectorConfiguration() != null) { - validateThreadPoolSize(myCfg.getConnectorConfiguration().getThreadPoolSize(), "connector"); + if (cfg.getConnectorConfiguration() != null) { + validateThreadPoolSize(cfg.getConnectorConfiguration().getThreadPoolSize(), "connector"); restExecSvc = new IgniteThreadPoolExecutor( "rest", - myCfg.getIgniteInstanceName(), - myCfg.getConnectorConfiguration().getThreadPoolSize(), - myCfg.getConnectorConfiguration().getThreadPoolSize(), + cfg.getIgniteInstanceName(), + cfg.getConnectorConfiguration().getThreadPoolSize(), + cfg.getConnectorConfiguration().getThreadPoolSize(), DFLT_THREAD_KEEP_ALIVE_TIME, - new LinkedBlockingQueue() + new LinkedBlockingQueue<>(), + GridIoPolicy.UNDEFINED, + oomeHnd ); restExecSvc.allowCoreThreadTimeOut(true); } - validateThreadPoolSize(myCfg.getUtilityCacheThreadPoolSize(), "utility cache"); + validateThreadPoolSize(cfg.getUtilityCacheThreadPoolSize(), "utility cache"); utilityCacheExecSvc = new IgniteThreadPoolExecutor( "utility", cfg.getIgniteInstanceName(), - myCfg.getUtilityCacheThreadPoolSize(), - myCfg.getUtilityCacheThreadPoolSize(), - myCfg.getUtilityCacheKeepAliveTime(), - new LinkedBlockingQueue(), - GridIoPolicy.UTILITY_CACHE_POOL); + cfg.getUtilityCacheThreadPoolSize(), + cfg.getUtilityCacheThreadPoolSize(), + cfg.getUtilityCacheKeepAliveTime(), + new LinkedBlockingQueue<>(), + GridIoPolicy.UTILITY_CACHE_POOL, + oomeHnd); utilityCacheExecSvc.allowCoreThreadTimeOut(true); @@ -1901,8 +1964,9 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { 1, 1, DFLT_THREAD_KEEP_ALIVE_TIME, - new LinkedBlockingQueue(), - GridIoPolicy.AFFINITY_POOL); + new LinkedBlockingQueue<>(), + GridIoPolicy.AFFINITY_POOL, + oomeHnd); affExecSvc.allowCoreThreadTimeOut(true); @@ -1915,8 +1979,9 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { cpus, cpus * 2, 3000L, - new LinkedBlockingQueue(1000), - GridIoPolicy.IDX_POOL + new LinkedBlockingQueue<>(1000), + GridIoPolicy.IDX_POOL, + oomeHnd ); } @@ -1928,8 +1993,9 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { cfg.getQueryThreadPoolSize(), cfg.getQueryThreadPoolSize(), DFLT_THREAD_KEEP_ALIVE_TIME, - new LinkedBlockingQueue(), - GridIoPolicy.QUERY_POOL); + new LinkedBlockingQueue<>(), + GridIoPolicy.QUERY_POOL, + oomeHnd); qryExecSvc.allowCoreThreadTimeOut(true); @@ -1939,8 +2005,9 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { 2, 2, DFLT_THREAD_KEEP_ALIVE_TIME, - new LinkedBlockingQueue(), - GridIoPolicy.SCHEMA_POOL); + new LinkedBlockingQueue<>(), + GridIoPolicy.SCHEMA_POOL, + oomeHnd); schemaExecSvc.allowCoreThreadTimeOut(true); @@ -1956,14 +2023,16 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { execCfg.getSize(), execCfg.getSize(), DFLT_THREAD_KEEP_ALIVE_TIME, - new LinkedBlockingQueue()); + new LinkedBlockingQueue<>(), + GridIoPolicy.UNDEFINED, + oomeHnd); customExecSvcs.put(execCfg.getName(), exec); } } // Register Ignite MBean for current grid instance. - registerFactoryMbean(myCfg.getMBeanServer()); + registerFactoryMbean(cfg.getMBeanServer()); boolean started = false; @@ -1973,8 +2042,10 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { // Init here to make grid available to lifecycle listeners. grid = grid0; + startTimer.finishGlobalStage("Configure system pool"); + grid0.start( - myCfg, + cfg, utilityCacheExecSvc, execSvc, svcExecSvc, @@ -1995,7 +2066,10 @@ private void start0(GridStartContext startCtx) throws IgniteCheckedException { @Override public void apply() { startLatch.countDown(); } - } + }, + workerRegistry, + oomeHnd, + startTimer ); state = STARTED; @@ -2108,8 +2182,10 @@ private IgniteConfiguration initializeConfiguration(IgniteConfiguration cfg) // If user provided IGNITE_HOME - set it as a system property. U.setIgniteHome(ggHome); + String userProvidedWorkDir = cfg.getWorkDirectory(); + // Correctly resolve work directory and set it back to configuration. - String workDir = U.workDirectory(cfg.getWorkDirectory(), ggHome); + String workDir = U.workDirectory(userProvidedWorkDir, ggHome); myCfg.setWorkDirectory(workDir); @@ -2133,6 +2209,9 @@ private IgniteConfiguration initializeConfiguration(IgniteConfiguration cfg) myCfg.setGridLogger(cfgLog); + if(F.isEmpty(userProvidedWorkDir) && F.isEmpty(U.IGNITE_WORK_DIR)) + log.warning("Ignite work directory is not provided, automatically resolved to: " + workDir); + // Check Ignite home folder (after log is available). if (ggHome != null) { File ggHomeFile = new File(ggHome); @@ -2214,8 +2293,7 @@ private IgniteConfiguration initializeConfiguration(IgniteConfiguration cfg) "(only recent 1.6 and 1.7 versions HotSpot VMs are supported). " + "To enable fast marshalling upgrade to recent 1.6 or 1.7 HotSpot VM release. " + "Switching to standard JDK marshalling - " + - "object serialization performance will be significantly slower.", - "To enable fast marshalling upgrade to recent 1.6 or 1.7 HotSpot VM release."); + "object serialization performance will be significantly slower."); marsh = new JdkMarshaller(); } @@ -2243,6 +2321,8 @@ private IgniteConfiguration initializeConfiguration(IgniteConfiguration cfg) initializeDefaultSpi(myCfg); + GridDiscoveryManager.initCommunicationErrorResolveConfiguration(myCfg); + initializeDefaultCacheConfiguration(myCfg); ExecutorConfiguration[] execCfgs = myCfg.getExecutorConfiguration(); @@ -2557,15 +2637,12 @@ private synchronized void stop0(boolean cancel) { throw e; } finally { - if (!grid0.context().invalid()) + if (grid0.context().segmented()) + state = STOPPED_ON_SEGMENTATION; + else if (grid0.context().invalid()) + state = STOPPED_ON_FAILURE; + else state = STOPPED; - else { - FailureContext failure = grid0.context().failure().failureContext(); - - state = failure.type() == FailureType.SEGMENTATION ? - STOPPED_ON_SEGMENTATION : - STOPPED_ON_FAILURE; - } grid = null; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/InvalidEnvironmentException.java b/modules/core/src/main/java/org/apache/ignite/internal/InvalidEnvironmentException.java new file mode 100644 index 0000000000000..d45a443e8afd8 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/InvalidEnvironmentException.java @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +/** + * Marker interface of invalid environment exception. + */ +public interface InvalidEnvironmentException { + +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/LongJVMPauseDetector.java b/modules/core/src/main/java/org/apache/ignite/internal/LongJVMPauseDetector.java index c10b6f975460c..c7d21e6ad2ab5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/LongJVMPauseDetector.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/LongJVMPauseDetector.java @@ -21,7 +21,9 @@ import java.util.TreeMap; import java.util.concurrent.atomic.AtomicReference; import org.apache.ignite.IgniteLogger; -import org.apache.ignite.logger.java.JavaLogger; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.lang.IgniteBiTuple; +import org.jetbrains.annotations.Nullable; import static org.apache.ignite.IgniteSystemProperties.IGNITE_JVM_PAUSE_DETECTOR_DISABLED; import static org.apache.ignite.IgniteSystemProperties.IGNITE_JVM_PAUSE_DETECTOR_LAST_EVENTS_COUNT; @@ -39,65 +41,84 @@ * configured in system or environment properties IGNITE_JVM_PAUSE_DETECTOR_PRECISION, * IGNITE_JVM_PAUSE_DETECTOR_THRESHOLD and IGNITE_JVM_PAUSE_DETECTOR_LAST_EVENTS_COUNT accordingly. */ -class LongJVMPauseDetector { - /** Logger. */ - private static final IgniteLogger LOG = new JavaLogger(); - - /** Worker reference. */ - private static final AtomicReference workerRef = new AtomicReference<>(); +public class LongJVMPauseDetector { + /** Ignite JVM pause detector threshold default value. */ + public static final int DEFAULT_JVM_PAUSE_DETECTOR_THRESHOLD = 500; /** Precision. */ private static final int PRECISION = getInteger(IGNITE_JVM_PAUSE_DETECTOR_PRECISION, 50); /** Threshold. */ - private static final int THRESHOLD = getInteger(IGNITE_JVM_PAUSE_DETECTOR_THRESHOLD, 500); + private static final int THRESHOLD = + getInteger(IGNITE_JVM_PAUSE_DETECTOR_THRESHOLD, DEFAULT_JVM_PAUSE_DETECTOR_THRESHOLD); /** Event count. */ private static final int EVT_CNT = getInteger(IGNITE_JVM_PAUSE_DETECTOR_LAST_EVENTS_COUNT, 20); + /** Disabled flag. */ + private static final boolean DISABLED = + getBoolean(IGNITE_JVM_PAUSE_DETECTOR_DISABLED, false); + + /** Logger. */ + private final IgniteLogger log; + + /** Worker reference. */ + private final AtomicReference workerRef = new AtomicReference<>(); + /** Long pause count. */ - private static long longPausesCnt; + private long longPausesCnt; /** Long pause total duration. */ - private static long longPausesTotalDuration; + private long longPausesTotalDuration; + + /** Last detector's wake up time. */ + private long lastWakeUpTime; /** Long pauses timestamps. */ - private static final long[] longPausesTimestamps = new long[EVT_CNT]; + private final long[] longPausesTimestamps = new long[EVT_CNT]; /** Long pauses durations. */ - private static final long[] longPausesDurations = new long[EVT_CNT]; + private final long[] longPausesDurations = new long[EVT_CNT]; + + /** + * @param log Logger. + */ + public LongJVMPauseDetector(IgniteLogger log) { + this.log = log; + } /** * Starts worker if not started yet. */ - public static void start() { - if (getBoolean(IGNITE_JVM_PAUSE_DETECTOR_DISABLED, false)) { - if (LOG.isDebugEnabled()) - LOG.debug("JVM Pause Detector is disabled."); + public void start() { + if (DISABLED) { + if (log.isDebugEnabled()) + log.debug("JVM Pause Detector is disabled."); return; } final Thread worker = new Thread("jvm-pause-detector-worker") { - private long prev = System.currentTimeMillis(); @Override public void run() { - if (LOG.isDebugEnabled()) - LOG.debug(getName() + " has been started."); + synchronized (LongJVMPauseDetector.this) { + lastWakeUpTime = System.currentTimeMillis(); + } + + if (log.isDebugEnabled()) + log.debug(getName() + " has been started."); while (true) { try { Thread.sleep(PRECISION); final long now = System.currentTimeMillis(); - final long pause = now - PRECISION - prev; - - prev = now; + final long pause = now - PRECISION - lastWakeUpTime; if (pause >= THRESHOLD) { - LOG.warning("Possible too long JVM pause: " + pause + " milliseconds."); + log.warning("Possible too long JVM pause: " + pause + " milliseconds."); - synchronized (LongJVMPauseDetector.class) { + synchronized (LongJVMPauseDetector.this) { final int next = (int)(longPausesCnt % EVT_CNT); longPausesCnt++; @@ -107,11 +128,21 @@ public static void start() { longPausesTimestamps[next] = now; longPausesDurations[next] = pause; + + lastWakeUpTime = now; + } + } + else { + synchronized (LongJVMPauseDetector.this) { + lastWakeUpTime = now; } } } catch (InterruptedException e) { - LOG.error(getName() + " has been interrupted", e); + if (workerRef.compareAndSet(this, null)) + log.error(getName() + " has been interrupted.", e); + else if (log.isDebugEnabled()) + log.debug(getName() + " has been stopped."); break; } @@ -120,43 +151,61 @@ public static void start() { }; if (!workerRef.compareAndSet(null, worker)) { - LOG.warning(LongJVMPauseDetector.class.getSimpleName() + " already started!"); + log.warning(LongJVMPauseDetector.class.getSimpleName() + " already started!"); return; } worker.setDaemon(true); worker.start(); + + if (log.isDebugEnabled()) + log.debug("LongJVMPauseDetector was successfully started"); } /** * Stops the worker if one is created and running. */ - public static void stop() { + public void stop() { final Thread worker = workerRef.getAndSet(null); if (worker != null && worker.isAlive() && !worker.isInterrupted()) worker.interrupt(); } + /** + * @return {@code false} if {@link IgniteSystemProperties#IGNITE_JVM_PAUSE_DETECTOR_DISABLED} set to {@code true}, + * and {@code true} otherwise. + */ + public static boolean enabled() { + return !DISABLED; + } + /** * @return Long JVM pauses count. */ - synchronized static long longPausesCount() { + synchronized long longPausesCount() { return longPausesCnt; } /** * @return Long JVM pauses total duration. */ - synchronized static long longPausesTotalDuration() { + synchronized long longPausesTotalDuration() { return longPausesTotalDuration; } + /** + * @return Last checker's wake up time. + */ + public synchronized long getLastWakeUpTime() { + return lastWakeUpTime; + } + /** * @return Last long JVM pause events. */ - synchronized static Map longPauseEvents() { + synchronized Map longPauseEvents() { final Map evts = new TreeMap<>(); for (int i = 0; i < longPausesTimestamps.length && longPausesTimestamps[i] != 0; i++) @@ -164,4 +213,17 @@ synchronized static Map longPauseEvents() { return evts; } + + /** + * @return Pair ({@code last long pause event time}, {@code pause time duration}) or {@code null}, if long pause + * wasn't occurred. + */ + public synchronized @Nullable IgniteBiTuple getLastLongPause() { + int lastPauseIdx = (int)((EVT_CNT + longPausesCnt - 1) % EVT_CNT); + + if (longPausesTimestamps[lastPauseIdx] == 0) + return null; + + return new IgniteBiTuple<>(longPausesTimestamps[lastPauseIdx], longPausesDurations[lastPauseIdx]); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java index 9bad1eacb772e..a4bbda0ed248e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerContextImpl.java @@ -49,6 +49,7 @@ import org.apache.ignite.internal.processors.marshaller.MarshallerMappingTransport; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgnitePredicate; @@ -196,6 +197,11 @@ public void onMappingDataReceived(byte platformId, Map mars int typeId = e.getKey(); String clsName = e.getValue().className(); + MappedName mappedName = platformCache.get(typeId); + + if (mappedName != null && !F.isEmpty(clsName) && clsName.equals(mappedName.className())) + continue; + platformCache.put(typeId, new MappedName(clsName, true)); fileStore.mergeAndWriteMapping(platformId, typeId, clsName); @@ -252,9 +258,10 @@ private void processResource(URL url) throws IOException { /** {@inheritDoc} */ @Override public boolean registerClassName( - byte platformId, - int typeId, - String clsName + byte platformId, + int typeId, + String clsName, + boolean failIfUnregistered ) throws IgniteCheckedException { ConcurrentMap cache = getCacheFor(platformId); @@ -270,7 +277,13 @@ private void processResource(URL url) throws IOException { if (transport.stopping()) return false; - IgniteInternalFuture fut = transport.awaitMappingAcceptance(new MarshallerMappingItem(platformId, typeId, clsName), cache); + MarshallerMappingItem item = new MarshallerMappingItem(platformId, typeId, clsName); + + GridFutureAdapter fut = transport.awaitMappingAcceptance(item, cache); + + if (failIfUnregistered && !fut.isDone()) + throw new UnregisteredBinaryTypeException(typeId, fut); + MappingExchangeResult res = fut.get(); return convertXchRes(res); @@ -280,13 +293,25 @@ private void processResource(URL url) throws IOException { if (transport.stopping()) return false; - IgniteInternalFuture fut = transport.proposeMapping(new MarshallerMappingItem(platformId, typeId, clsName), cache); + MarshallerMappingItem item = new MarshallerMappingItem(platformId, typeId, clsName); + + GridFutureAdapter fut = transport.proposeMapping(item, cache); + + if (failIfUnregistered && !fut.isDone()) + throw new UnregisteredBinaryTypeException(typeId, fut); + MappingExchangeResult res = fut.get(); return convertXchRes(res); } } + /** {@inheritDoc} */ + @Override + public boolean registerClassName(byte platformId, int typeId, String clsName) throws IgniteCheckedException { + return registerClassName(platformId, typeId, clsName, false); + } + /** {@inheritDoc} */ @Override public boolean registerClassNameLocally(byte platformId, int typeId, String clsName) throws IgniteCheckedException diff --git a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java index 6fb1371f10ea3..66d19db325b43 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/MarshallerMappingFileStore.java @@ -45,6 +45,9 @@ * when a classname is requested but is not presented in local cache of {@link MarshallerContextImpl}. */ final class MarshallerMappingFileStore { + /** File lock timeout in milliseconds. */ + private static final int FILE_LOCK_TIMEOUT_MS = 5000; + /** */ private static final GridStripedLock fileLock = new GridStripedLock(32); @@ -92,14 +95,12 @@ void writeMapping(byte platformId, int typeId, String typeName) { File file = new File(workDir, fileName); try (FileOutputStream out = new FileOutputStream(file)) { - FileLock fileLock = fileLock(out.getChannel(), false); - - assert fileLock != null : fileName; - try (Writer writer = new OutputStreamWriter(out, StandardCharsets.UTF_8)) { - writer.write(typeName); + try (FileLock ignored = fileLock(out.getChannel(), false)) { + writer.write(typeName); - writer.flush(); + writer.flush(); + } } } catch (IOException e) { @@ -120,11 +121,10 @@ void writeMapping(byte platformId, int typeId, String typeName) { } /** - * @param platformId Platform id. - * @param typeId Type id. + * @param fileName File name. */ - String readMapping(byte platformId, int typeId) throws IgniteCheckedException { - String fileName = getFileName(platformId, typeId); + private String readMapping(String fileName) throws IgniteCheckedException { + ThreadLocalRandom rnd = null; Lock lock = fileLock(fileName); @@ -133,17 +133,30 @@ String readMapping(byte platformId, int typeId) throws IgniteCheckedException { try { File file = new File(workDir, fileName); - try (FileInputStream in = new FileInputStream(file)) { - FileLock fileLock = fileLock(in.getChannel(), true); + long time = 0; + + while (true) { + try (FileInputStream in = new FileInputStream(file)) { + try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) { + try (FileLock ignored = fileLock(in.getChannel(), true)) { + if (file.length() > 0) + return reader.readLine(); + + if (rnd == null) + rnd = ThreadLocalRandom.current(); - assert fileLock != null : fileName; + if (time == 0) + time = System.nanoTime(); + else if (U.millisSinceNanos(time) >= FILE_LOCK_TIMEOUT_MS) + return null; - try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) { - return reader.readLine(); + U.sleep(rnd.nextLong(50)); + } + } + } + catch (IOException ignored) { + return null; } - } - catch (IOException ignored) { - return null; } } finally { @@ -151,6 +164,14 @@ String readMapping(byte platformId, int typeId) throws IgniteCheckedException { } } + /** + * @param platformId Platform id. + * @param typeId Type id. + */ + String readMapping(byte platformId, int typeId) throws IgniteCheckedException { + return readMapping(getFileName(platformId, typeId)); + } + /** * Restores all mappings available in file system to marshaller context. * This method should be used only on node startup. @@ -165,22 +186,16 @@ void restoreMappings(MarshallerContext marshCtx) throws IgniteCheckedException { int typeId = getTypeId(name); - try (FileInputStream in = new FileInputStream(file)) { - try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, StandardCharsets.UTF_8))) { - String clsName = reader.readLine(); + String clsName = readMapping(name); - if (clsName == null) { - throw new IgniteCheckedException("Class name is null for [platformId=" + platformId + - ", typeId=" + typeId + "], marshaller mappings storage is broken. " + - "Clean up marshaller directory (/marshaller) and restart the node."); - } - - marshCtx.registerClassNameLocally(platformId, typeId, clsName); - } - } - catch (IOException e) { - throw new IgniteCheckedException("Reading marshaller mapping from file " + name + " failed.", e); + if (clsName == null) { + throw new IgniteCheckedException("Class name is null for [platformId=" + platformId + + ", typeId=" + typeId + "], marshaller mappings storage is broken. " + + "Clean up marshaller directory (/marshaller) and restart the node. File name: " + name + + ", FileSize: " + file.length()); } + + marshCtx.registerClassNameLocally(platformId, typeId, clsName); } } @@ -276,10 +291,10 @@ private static FileLock fileLock( while (true) { FileLock fileLock = ch.tryLock(0L, Long.MAX_VALUE, shared); - if (fileLock == null) - U.sleep(rnd.nextLong(50)); - else + if (fileLock != null) return fileLock; + + U.sleep(rnd.nextLong(50)); } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/SecurityCredentialsAttrFilterPredicate.java b/modules/core/src/main/java/org/apache/ignite/internal/SecurityCredentialsAttrFilterPredicate.java new file mode 100644 index 0000000000000..2f774bdcf4d32 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/SecurityCredentialsAttrFilterPredicate.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.lang.IgnitePredicate; + +/** + * Predicate to filter out security credentials attribute by its name. + */ +public class SecurityCredentialsAttrFilterPredicate implements IgnitePredicate { + /** */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override public boolean apply(String s) { + return !IgniteNodeAttributes.ATTR_SECURITY_CREDENTIALS.equals(s); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(SecurityCredentialsAttrFilterPredicate.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/StripedExecutorMXBeanAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/StripedExecutorMXBeanAdapter.java index e6811b7aee762..c1890931de12f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/StripedExecutorMXBeanAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/StripedExecutorMXBeanAdapter.java @@ -32,7 +32,7 @@ public class StripedExecutorMXBeanAdapter implements StripedExecutorMXBean { /** * @param exec Executor service */ - StripedExecutorMXBeanAdapter(StripedExecutor exec) { + public StripedExecutorMXBeanAdapter(StripedExecutor exec) { assert exec != null; this.exec = exec; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/TransactionMetricsMxBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/TransactionMetricsMxBeanImpl.java new file mode 100644 index 0000000000000..916f5c10d990c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/TransactionMetricsMxBeanImpl.java @@ -0,0 +1,124 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +import java.util.Map; +import org.apache.ignite.mxbean.TransactionMetricsMxBean; +import org.apache.ignite.transactions.TransactionMetrics; + +/** + * Transactions MXBean implementation. + */ +public class TransactionMetricsMxBeanImpl implements TransactionMetricsMxBean { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private final TransactionMetrics transactionMetrics; + + /** + * Create TransactionMetricsMxBeanImpl. + */ + public TransactionMetricsMxBeanImpl() { + this(null); + } + + /** + * @param transactionMetrics Transaction metrics. + */ + public TransactionMetricsMxBeanImpl(TransactionMetrics transactionMetrics) { + this.transactionMetrics = transactionMetrics; + } + + /** {@inheritDoc} */ + @Override public long commitTime() { + return transactionMetrics.commitTime(); + } + + /** {@inheritDoc} */ + @Override public long rollbackTime() { + return transactionMetrics.rollbackTime(); + } + + /** {@inheritDoc} */ + @Override public int txCommits() { + return transactionMetrics.txCommits(); + } + + /** {@inheritDoc} */ + @Override public int txRollbacks() { + return transactionMetrics.txRollbacks(); + } + + /** {@inheritDoc} */ + @Override public Map getAllOwnerTransactions() { + return transactionMetrics.getAllOwnerTransactions(); + } + + /** {@inheritDoc} */ + @Override public Map getLongRunningOwnerTransactions(int duration) { + return transactionMetrics.getLongRunningOwnerTransactions(duration); + } + + /** {@inheritDoc} */ + @Override public long getTransactionsCommittedNumber() { + return transactionMetrics.getTransactionsCommittedNumber(); + } + + /** {@inheritDoc} */ + @Override public long getTransactionsRolledBackNumber() { + return transactionMetrics.getTransactionsRolledBackNumber(); + } + + /** {@inheritDoc} */ + @Override public long getTransactionsHoldingLockNumber() { + return transactionMetrics.getTransactionsHoldingLockNumber(); + } + + /** {@inheritDoc} */ + @Override public long getLockedKeysNumber() { + return transactionMetrics.getLockedKeysNumber(); + } + + /** {@inheritDoc} */ + @Override public long getOwnerTransactionsNumber() { + return transactionMetrics.getOwnerTransactionsNumber(); + } + + /** {@inheritDoc} */ + @Override public long getTotalNodeSystemTime() { + return transactionMetrics.getTotalNodeSystemTime(); + } + + /** {@inheritDoc} */ + @Override public long getTotalNodeUserTime() { + return transactionMetrics.getTotalNodeUserTime(); + } + + /** {@inheritDoc} */ + @Override public String getNodeSystemTimeHistogram() { + return transactionMetrics.getNodeSystemTimeHistogram(); + } + + /** {@inheritDoc} */ + @Override public String getNodeUserTimeHistogram() { + return transactionMetrics.getNodeUserTimeHistogram(); + } +} + + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java new file mode 100644 index 0000000000000..f4304396eb9b9 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/TransactionsMXBeanImpl.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +import java.io.PrintWriter; +import java.io.StringWriter; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.ignite.IgniteCompute; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.visor.VisorTaskArgument; +import org.apache.ignite.internal.visor.tx.VisorTxInfo; +import org.apache.ignite.internal.visor.tx.VisorTxOperation; +import org.apache.ignite.internal.visor.tx.VisorTxProjection; +import org.apache.ignite.internal.visor.tx.VisorTxSortOrder; +import org.apache.ignite.internal.visor.tx.VisorTxTask; +import org.apache.ignite.internal.visor.tx.VisorTxTaskArg; +import org.apache.ignite.internal.visor.tx.VisorTxTaskResult; +import org.apache.ignite.mxbean.TransactionsMXBean; + +/** + * TransactionsMXBean implementation. + */ +public class TransactionsMXBeanImpl implements TransactionsMXBean { + /** */ + private final GridKernalContextImpl ctx; + + /** + * @param ctx Context. + */ + public TransactionsMXBeanImpl(GridKernalContextImpl ctx) { + this.ctx = ctx; + } + + /** {@inheritDoc} */ + @Override public String getActiveTransactions(Long minDuration, Integer minSize, String prj, String consistentIds, + String xid, String lbRegex, Integer limit, String order, boolean detailed, boolean kill) { + try { + IgniteCompute compute = ctx.cluster().get().compute(); + + VisorTxProjection proj = null; + + if (prj != null) { + if ("clients".equals(prj)) + proj = VisorTxProjection.CLIENT; + else if ("servers".equals(prj)) + proj = VisorTxProjection.SERVER; + } + + List consIds = null; + + if (consistentIds != null) + consIds = Arrays.stream(consistentIds.split(",")).collect(Collectors.toList()); + + VisorTxSortOrder sortOrder = null; + + if (order != null) + sortOrder = VisorTxSortOrder.valueOf(order.toUpperCase()); + + VisorTxTaskArg arg = new VisorTxTaskArg(kill ? VisorTxOperation.KILL : VisorTxOperation.LIST, + limit, minDuration == null ? null : minDuration * 1000, minSize, null, proj, consIds, xid, lbRegex, sortOrder, null); + + Map res = compute.execute(new VisorTxTask(), + new VisorTaskArgument<>(ctx.cluster().get().localNode().id(), arg, false)); + + if (detailed) { + StringWriter sw = new StringWriter(); + + PrintWriter w = new PrintWriter(sw); + + for (Map.Entry entry : res.entrySet()) { + if (entry.getValue().getInfos().isEmpty()) + continue; + + ClusterNode key = entry.getKey(); + + w.println(key.toString()); + + for (VisorTxInfo info : entry.getValue().getInfos()) + w.println(info.toUserString()); + } + + w.flush(); + + return sw.toString(); + } + else { + int cnt = 0; + + for (VisorTxTaskResult result : res.values()) + cnt += result.getInfos().size(); + + return Integer.toString(cnt); + } + } + catch (Exception e) { + throw new RuntimeException(e.getMessage()); + } + } + + /** {@inheritDoc} */ + @Override public long getTxTimeoutOnPartitionMapExchange() { + return ctx.config().getTransactionConfiguration().getTxTimeoutOnPartitionMapExchange(); + } + + /** {@inheritDoc} */ + @Override public void setTxTimeoutOnPartitionMapExchange(long timeout) { + try { + ctx.grid().context().cache().setTxTimeoutOnPartitionMapExchange(timeout); + } + catch (Exception e) { + throw new RuntimeException(e.getMessage()); + } + } + + /** {@inheritDoc} */ + @Override public boolean getTxOwnerDumpRequestsAllowed() { + return ctx.cache().context().tm().txOwnerDumpRequestsAllowed(); + } + + /** {@inheritDoc} */ + @Override public void setTxOwnerDumpRequestsAllowed(boolean allowed) { + ctx.cache().setTxOwnerDumpRequestsAllowed(allowed); + } + + /** {@inheritDoc} */ + @Override public long getLongTransactionTimeDumpThreshold() { + return ctx.cache().context().tm().longTransactionTimeDumpThreshold(); + } + + /** {@inheritDoc} */ + @Override public void setLongTransactionTimeDumpThreshold(long threshold) { + ctx.cache().longTransactionTimeDumpThreshold(threshold); + } + + /** {@inheritDoc} */ + @Override public double getTransactionTimeDumpSamplesCoefficient() { + return ctx.cache().context().tm().transactionTimeDumpSamplesCoefficient(); + } + + /** {@inheritDoc} */ + @Override public void setTransactionTimeDumpSamplesCoefficient(double coefficient) { + ctx.cache().transactionTimeDumpSamplesCoefficient(coefficient); + } + + /** {@inheritDoc} */ + @Override public int getTransactionTimeDumpSamplesPerSecondLimit() { + return ctx.cache().context().tm().transactionTimeDumpSamplesPerSecondLimit(); + } + + /** {@inheritDoc} */ + @Override public void setTransactionTimeDumpSamplesPerSecondLimit(int limit) { + ctx.cache().longTransactionTimeDumpSamplesPerSecondLimit(limit); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(TransactionsMXBeanImpl.class, this); + } +} + + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/UnregisteredBinaryTypeException.java b/modules/core/src/main/java/org/apache/ignite/internal/UnregisteredBinaryTypeException.java new file mode 100644 index 0000000000000..de507be5405a3 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/UnregisteredBinaryTypeException.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.binary.BinaryMetadata; +import org.apache.ignite.internal.util.future.GridFutureAdapter; + +/** + * Exception thrown during serialization if binary metadata isn't registered and it's registration isn't allowed. + * Used for both binary types and marshalling mappings. + * Confusing old class name is preserved for backwards compatibility. + */ +public class UnregisteredBinaryTypeException extends IgniteException { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private static final String MESSAGE = + "Attempted to update binary metadata inside a critical synchronization block (will be " + + "automatically retried). This exception must not be wrapped to any other exception class. " + + "If you encounter this exception outside of EntryProcessor, please report to Apache Ignite " + + "dev-list. Debug info [typeId=%d, binaryMetadata=%s, fut=%s]"; + + /** */ + private static String createMessage(int typeId, BinaryMetadata binaryMetadata, GridFutureAdapter fut) { + return String.format(MESSAGE, typeId, binaryMetadata, fut); + } + + /** */ + private final int typeId; + + /** */ + private final BinaryMetadata binaryMetadata; + + /** */ + private final GridFutureAdapter fut; + + /** + * @param typeId Type ID. + * @param binaryMetadata Binary metadata. + */ + public UnregisteredBinaryTypeException(int typeId, BinaryMetadata binaryMetadata) { + this(typeId, binaryMetadata, null); + } + + /** + * @param typeId Type ID. + * @param fut Future to wait in handler. + */ + public UnregisteredBinaryTypeException(int typeId, GridFutureAdapter fut) { + this(typeId, null, fut); + } + + /** + * @param typeId Type ID. + * @param binaryMetadata Binary metadata. + * @param fut Future to wait in handler. + */ + private UnregisteredBinaryTypeException(int typeId, BinaryMetadata binaryMetadata, GridFutureAdapter fut) { + super(createMessage(typeId, binaryMetadata, fut)); + + this.typeId = typeId; + this.binaryMetadata = binaryMetadata; + this.fut = fut; + } + + /** + * @return Type ID. + */ + public int typeId() { + return typeId; + } + + /** + * @return Binary metadata. + */ + public BinaryMetadata binaryMetadata() { + return binaryMetadata; + } + + /** + * @return Future to wait in handler. + */ + public GridFutureAdapter future() { + return fut; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/UnregisteredClassException.java b/modules/core/src/main/java/org/apache/ignite/internal/UnregisteredClassException.java new file mode 100644 index 0000000000000..6da7daad5941d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/UnregisteredClassException.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal; + +import org.apache.ignite.IgniteException; +import org.jetbrains.annotations.Nullable; + +/** + * Exception thrown during serialization if class isn't registered and it's registration isn't allowed. + */ +public class UnregisteredClassException extends IgniteException { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private final Class cls; + + /** + * @param cls Class that isn't registered. + */ + public UnregisteredClassException(Class cls) { + this.cls = cls; + } + + /** + * @param msg Error message. + * @param cls Class that isn't registered. + */ + public UnregisteredClassException(String msg, Class cls) { + super(msg); + this.cls = cls; + } + + /** + * @param cause Exception cause. + * @param cls Class that isn't registered. + */ + public UnregisteredClassException(Throwable cause, Class cls) { + super(cause); + this.cls = cls; + } + + /** + * @param msg Error message. + * @param cause Exception cause. + * @param cls Class that isn't registered. + */ + public UnregisteredClassException(String msg, @Nullable Throwable cause, Class cls) { + super(msg, cause); + this.cls = cls; + } + + /** + * @return Class that isn't registered. + */ + public Class cls() { + return cls; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryCachingMetadataHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryCachingMetadataHandler.java index 535249c80ac18..a0559cbdc8ba1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryCachingMetadataHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryCachingMetadataHandler.java @@ -46,7 +46,7 @@ private BinaryCachingMetadataHandler() { } /** {@inheritDoc} */ - @Override public synchronized void addMeta(int typeId, BinaryType type) throws BinaryObjectException { + @Override public synchronized void addMeta(int typeId, BinaryType type, boolean failIfUnregistered) throws BinaryObjectException { synchronized (this) { BinaryType oldType = metas.put(typeId, type); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java index 106d238660f2f..7f86391d52748 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryClassDescriptor.java @@ -40,6 +40,8 @@ import org.apache.ignite.binary.BinaryReflectiveSerializer; import org.apache.ignite.binary.BinarySerializer; import org.apache.ignite.binary.Binarylizable; +import org.apache.ignite.internal.UnregisteredBinaryTypeException; +import org.apache.ignite.internal.UnregisteredClassException; import org.apache.ignite.internal.marshaller.optimized.OptimizedMarshaller; import org.apache.ignite.internal.processors.cache.CacheObjectImpl; import org.apache.ignite.internal.processors.query.QueryUtils; @@ -773,7 +775,7 @@ void write(Object obj, BinaryWriterExImpl writer) throws BinaryObjectException { BinaryMetadata meta = new BinaryMetadata(typeId, typeName, collector.meta(), affKeyFieldName, Collections.singleton(newSchema), false, null); - ctx.updateMetadata(typeId, meta); + ctx.updateMetadata(typeId, meta, writer.failIfUnregistered()); schemaReg.addSchema(newSchema.schemaId(), newSchema); } @@ -794,7 +796,7 @@ void write(Object obj, BinaryWriterExImpl writer) throws BinaryObjectException { BinaryMetadata meta = new BinaryMetadata(typeId, typeName, stableFieldsMeta, affKeyFieldName, Collections.singleton(stableSchema), false, null); - ctx.updateMetadata(typeId, meta); + ctx.updateMetadata(typeId, meta, writer.failIfUnregistered()); schemaReg.addSchema(stableSchema.schemaId(), stableSchema); @@ -822,6 +824,9 @@ void write(Object obj, BinaryWriterExImpl writer) throws BinaryObjectException { assert false : "Invalid mode: " + mode; } } + catch (UnregisteredBinaryTypeException | UnregisteredClassException e) { + throw e; + } catch (Exception e) { String msg; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java index 233769660c9f9..c263def8aae75 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryContext.java @@ -48,6 +48,9 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.UnregisteredBinaryTypeException; +import org.apache.ignite.internal.UnregisteredClassException; +import org.apache.ignite.internal.processors.marshaller.MappingExchangeResult; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.binary.BinaryBasicIdMapper; import org.apache.ignite.binary.BinaryBasicNameMapper; @@ -610,17 +613,22 @@ else if (cpElement.isFile()) { /** * @param cls Class. + * @param failIfUnregistered Throw exception if class isn't registered. * @return Class descriptor. * @throws BinaryObjectException In case of error. */ - public BinaryClassDescriptor descriptorForClass(Class cls, boolean deserialize) + public BinaryClassDescriptor descriptorForClass(Class cls, boolean deserialize, boolean failIfUnregistered) throws BinaryObjectException { assert cls != null; BinaryClassDescriptor desc = descByCls.get(cls); - if (desc == null) + if (desc == null) { + if (failIfUnregistered) + throw new UnregisteredClassException(cls); + desc = registerClassDescriptor(cls, deserialize); + } else if (!desc.registered()) { if (!desc.userType()) { BinaryClassDescriptor desc0 = new BinaryClassDescriptor( @@ -647,13 +655,17 @@ else if (!desc.registered()) { schemas, desc0.isEnum(), cls.isEnum() ? enumMap(cls) : null); - metaHnd.addMeta(desc0.typeId(), meta.wrap(this)); + metaHnd.addMeta(desc0.typeId(), meta.wrap(this), false); return desc0; } } - else + else { + if (failIfUnregistered) + throw new UnregisteredClassException(cls); + desc = registerUserClassDescriptor(desc); + } } return desc; @@ -771,7 +783,7 @@ private BinaryClassDescriptor registerUserClassDescriptor(Class cls, boolean final int typeId = mapper.typeId(clsName); - registered = registerUserClassName(typeId, cls.getName()); + registered = registerUserClassName(typeId, cls.getName(), false); BinarySerializer serializer = serializerForClass(cls); @@ -791,7 +803,7 @@ private BinaryClassDescriptor registerUserClassDescriptor(Class cls, boolean if (!deserialize) metaHnd.addMeta(typeId, new BinaryMetadata(typeId, typeName, desc.fieldsMeta(), affFieldName, null, - desc.isEnum(), cls.isEnum() ? enumMap(cls) : null).wrap(this)); + desc.isEnum(), cls.isEnum() ? enumMap(cls) : null).wrap(this), false); descByCls.put(cls, desc); @@ -809,7 +821,7 @@ private BinaryClassDescriptor registerUserClassDescriptor(Class cls, boolean private BinaryClassDescriptor registerUserClassDescriptor(BinaryClassDescriptor desc) { boolean registered; - registered = registerUserClassName(desc.typeId(), desc.describedClass().getName()); + registered = registerUserClassName(desc.typeId(), desc.describedClass().getName(), false); if (registered) { BinarySerializer serializer = desc.initialSerializer(); @@ -1160,7 +1172,7 @@ public void registerUserType(String clsName, } metaHnd.addMeta(id, - new BinaryMetadata(id, typeName, fieldsMeta, affKeyFieldName, null, isEnum, enumMap).wrap(this)); + new BinaryMetadata(id, typeName, fieldsMeta, affKeyFieldName, null, isEnum, enumMap).wrap(this), false); } /** @@ -1176,20 +1188,22 @@ public void registerUserTypesSchema() { /** * Register "type ID to class name" mapping on all nodes to allow for mapping requests resolution form client. * Other {@link BinaryContext}'s "register" methods and method - * {@link BinaryContext#descriptorForClass(Class, boolean)} already call this functionality so use this method - * only when registering class names whose {@link Class} is unknown. + * {@link BinaryContext#descriptorForClass(Class, boolean, boolean)} already call this functionality + * so use this method only when registering class names whose {@link Class} is unknown. * * @param typeId Type ID. * @param clsName Class Name. + * @param failIfUnregistered If {@code true} then throw {@link UnregisteredBinaryTypeException} with + * {@link MappingExchangeResult} future instead of synchronously awaiting for its completion. * @return {@code True} if the mapping was registered successfully. */ - public boolean registerUserClassName(int typeId, String clsName) { + public boolean registerUserClassName(int typeId, String clsName, boolean failIfUnregistered) { IgniteCheckedException e = null; boolean res = false; try { - res = marshCtx.registerClassName(JAVA_ID, typeId, clsName); + res = marshCtx.registerClassName(JAVA_ID, typeId, clsName, failIfUnregistered); } catch (DuplicateTypeIdException dupEx) { // Ignore if trying to register mapped type name of the already registered class name and vise versa @@ -1315,10 +1329,11 @@ public BinaryIdentityResolver identity(int typeId) { /** * @param typeId Type ID. * @param meta Meta data. + * @param failIfUnregistered Fail if unregistered. * @throws BinaryObjectException In case of error. */ - public void updateMetadata(int typeId, BinaryMetadata meta) throws BinaryObjectException { - metaHnd.addMeta(typeId, meta.wrap(this)); + public void updateMetadata(int typeId, BinaryMetadata meta, boolean failIfUnregistered) throws BinaryObjectException { + metaHnd.addMeta(typeId, meta.wrap(this), failIfUnregistered); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java index 12a0fc352b99d..275169561fd56 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryEnumObjectImpl.java @@ -437,6 +437,6 @@ public BinaryEnumObjectImpl(BinaryContext ctx, byte[] arr) { * binary enum. */ public boolean isTypeEquals(final Class cls) { - return ctx.descriptorForClass(cls, false).typeId() == typeId(); + return ctx.descriptorForClass(cls, false, false).typeId() == typeId(); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldAccessor.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldAccessor.java index 32774035671f7..fa2a9c971d207 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldAccessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldAccessor.java @@ -26,6 +26,8 @@ import java.util.Map; import java.util.UUID; import org.apache.ignite.binary.BinaryObjectException; +import org.apache.ignite.internal.UnregisteredBinaryTypeException; +import org.apache.ignite.internal.UnregisteredClassException; import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.S; @@ -154,6 +156,9 @@ public void write(Object obj, BinaryWriterExImpl writer) throws BinaryObjectExce try { write0(obj, writer); } + catch (UnregisteredClassException | UnregisteredBinaryTypeException ex) { + throw ex; + } catch (Exception ex) { if (S.INCLUDE_SENSITIVE && !F.isEmpty(name)) throw new BinaryObjectException("Failed to write field [name=" + name + ']', ex); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java index 883576c9faad3..de0b2d0d3d3cc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryFieldImpl.java @@ -21,6 +21,7 @@ import java.math.BigInteger; import java.nio.ByteBuffer; import java.nio.ByteOrder; +import java.sql.Time; import java.sql.Timestamp; import java.util.Date; import java.util.UUID; @@ -211,6 +212,14 @@ public int fieldId() { break; } + case GridBinaryMarshaller.TIME: { + long time = buf.getLong(); + + val = new Time(time); + + break; + } + case GridBinaryMarshaller.UUID: { long most = buf.getLong(); long least = buf.getLong(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMarshaller.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMarshaller.java index dfc726e81e8d3..bfb0e1018a91c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMarshaller.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMarshaller.java @@ -79,7 +79,7 @@ private void setBinaryContext(BinaryContext ctx, IgniteConfiguration cfg) { /** {@inheritDoc} */ @Override protected byte[] marshal0(@Nullable Object obj) throws IgniteCheckedException { - return impl.marshal(obj); + return impl.marshal(obj, false); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadataHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadataHandler.java index 5df32e7216ac7..3652d98aa50a1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadataHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryMetadataHandler.java @@ -20,22 +20,25 @@ import java.util.Collection; import org.apache.ignite.binary.BinaryObjectException; import org.apache.ignite.binary.BinaryType; +import org.apache.ignite.internal.processors.cache.binary.MetadataUpdateProposedMessage; /** - * Binary meta data handler. + * Binary metadata handler. */ public interface BinaryMetadataHandler { /** - * Adds meta data. + * Adds a new or updates an existing metadata to the latest version. + * See {@link MetadataUpdateProposedMessage} javadoc for detailed protocol description. * * @param typeId Type ID. * @param meta Metadata. + * @param failIfUnregistered Fail if unregistered. * @throws BinaryObjectException In case of error. */ - public void addMeta(int typeId, BinaryType meta) throws BinaryObjectException; + public void addMeta(int typeId, BinaryType meta, boolean failIfUnregistered) throws BinaryObjectException; /** - * Gets meta data for provided type ID. + * Gets metadata for provided type ID. * * @param typeId Type ID. * @return Metadata. @@ -44,7 +47,7 @@ public interface BinaryMetadataHandler { public BinaryType metadata(int typeId) throws BinaryObjectException; /** - * Gets unwrapped meta data for provided type ID. + * Gets unwrapped metadata for provided type ID. * * @param typeId Type ID. * @return Metadata. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryNoopMetadataHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryNoopMetadataHandler.java index bbd931110e30a..4ee24285c7ee5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryNoopMetadataHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryNoopMetadataHandler.java @@ -43,7 +43,7 @@ private BinaryNoopMetadataHandler() { } /** {@inheritDoc} */ - @Override public void addMeta(int typeId, BinaryType meta) throws BinaryObjectException { + @Override public void addMeta(int typeId, BinaryType meta, boolean failIfUnregistered) throws BinaryObjectException { // No-op. } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectExImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectExImpl.java index c5fe6da73eab9..05d2bf81879d7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectExImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryObjectExImpl.java @@ -32,6 +32,7 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.lang.IgniteUuid; +import org.apache.ignite.thread.IgniteThread; import org.jetbrains.annotations.Nullable; /** @@ -201,12 +202,17 @@ private String toString(BinaryReaderHandles ctx, IdentityHashMap existingSchemas = meta.schemas(); + + for (BinarySchema existingSchema : existingSchemas) { + if (schemaId == existingSchema.schemaId()) { + schema = existingSchema; break; } } - if (schema == null) - throw new BinaryObjectException("Cannot find schema for object with compact footer [" + - "typeId=" + typeId + ", schemaId=" + schemaId + ']'); + if (schema == null) { + List existingSchemaIds = new ArrayList<>(existingSchemas.size()); + + for (BinarySchema existingSchema : existingSchemas) + existingSchemaIds.add(existingSchema.schemaId()); + + throw new BinaryObjectException("Cannot find schema for object with compact footer" + + " [typeName=" + type.typeName() + + ", typeId=" + typeId + + ", missingSchemaId=" + schemaId + + ", existingSchemaIds=" + existingSchemaIds + ']' + ); + } } else schema = createSchema(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinarySchemaRegistry.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinarySchemaRegistry.java index 91f29b22cfd8a..f22fc4c052121 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinarySchemaRegistry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinarySchemaRegistry.java @@ -17,6 +17,8 @@ package org.apache.ignite.internal.binary; +import java.util.ArrayList; +import java.util.List; import org.jetbrains.annotations.Nullable; import java.util.HashMap; @@ -98,75 +100,95 @@ else if (schemaId == schemaId4) * @param schemaId Schema ID. * @param schema Schema. */ - public void addSchema(int schemaId, BinarySchema schema) { - synchronized (this) { - if (inline) { - // Check if this is already known schema. - if (schemaId == schemaId1 || schemaId == schemaId2 || schemaId == schemaId3 || schemaId == schemaId4) - return; + public synchronized void addSchema(int schemaId, BinarySchema schema) { + if (inline) { + // Check if this is already known schema. + if (schemaId == schemaId1 || schemaId == schemaId2 || schemaId == schemaId3 || schemaId == schemaId4) + return; - // Try positioning new schema in inline mode. - if (schemaId1 == EMPTY) { - schemaId1 = schemaId; + // Try positioning new schema in inline mode. + if (schemaId1 == EMPTY) { + schemaId1 = schemaId; - schema1 = schema; + schema1 = schema; - inline = true; // Forcing HB edge just in case. + inline = true; // Forcing HB edge just in case. - return; - } + return; + } - if (schemaId2 == EMPTY) { - schemaId2 = schemaId; + if (schemaId2 == EMPTY) { + schemaId2 = schemaId; - schema2 = schema; + schema2 = schema; - inline = true; // Forcing HB edge just in case. + inline = true; // Forcing HB edge just in case. - return; - } + return; + } - if (schemaId3 == EMPTY) { - schemaId3 = schemaId; + if (schemaId3 == EMPTY) { + schemaId3 = schemaId; - schema3 = schema; + schema3 = schema; - inline = true; // Forcing HB edge just in case. + inline = true; // Forcing HB edge just in case. - return; - } + return; + } - if (schemaId4 == EMPTY) { - schemaId4 = schemaId; + if (schemaId4 == EMPTY) { + schemaId4 = schemaId; - schema4 = schema; + schema4 = schema; - inline = true; // Forcing HB edge just in case. + inline = true; // Forcing HB edge just in case. - return; - } + return; + } - // No luck, switching to hash map mode. - HashMap newSchemas = new HashMap<>(); + // No luck, switching to hash map mode. + HashMap newSchemas = new HashMap<>(); - newSchemas.put(schemaId1, schema1); - newSchemas.put(schemaId2, schema2); - newSchemas.put(schemaId3, schema3); - newSchemas.put(schemaId4, schema4); + newSchemas.put(schemaId1, schema1); + newSchemas.put(schemaId2, schema2); + newSchemas.put(schemaId3, schema3); + newSchemas.put(schemaId4, schema4); - newSchemas.put(schemaId, schema); + newSchemas.put(schemaId, schema); - schemas = newSchemas; + schemas = newSchemas; - inline = false; - } - else { - HashMap newSchemas = new HashMap<>(schemas); + inline = false; + } + else { + HashMap newSchemas = new HashMap<>(schemas); - newSchemas.put(schemaId, schema); + newSchemas.put(schemaId, schema); - schemas = newSchemas; - } + schemas = newSchemas; } } + + /** + * @return List of known schemas. + */ + public synchronized List schemas() { + List res = new ArrayList<>(); + + if (inline) { + if (schemaId1 != EMPTY) + res.add(schema1); + if (schemaId2 != EMPTY) + res.add(schema2); + if (schemaId3 != EMPTY) + res.add(schema3); + if (schemaId4 != EMPTY) + res.add(schema4); + } + else + res.addAll(schemas.values()); + + return res; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryUtils.java index 1f167f5f08080..77dce5602ec4e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryUtils.java @@ -600,7 +600,7 @@ public static byte typeByClass(Class cls) { if (type != null) return type; - if (isEnum(cls)) + if (U.isEnum(cls)) return GridBinaryMarshaller.ENUM; if (cls.isArray()) @@ -958,10 +958,30 @@ else if (fieldOffsetSize == OFFSET_2) * @throws BinaryObjectException If merge failed due to metadata conflict. */ public static BinaryMetadata mergeMetadata(@Nullable BinaryMetadata oldMeta, BinaryMetadata newMeta) { + return mergeMetadata(oldMeta, newMeta, null); + } + + /** + * Merge old and new metas. + * + * @param oldMeta Old meta. + * @param newMeta New meta. + * @param changedSchemas Set for holding changed schemas. + * @return New meta if old meta was null, old meta if no changes detected, merged meta otherwise. + * @throws BinaryObjectException If merge failed due to metadata conflict. + */ + public static BinaryMetadata mergeMetadata(@Nullable BinaryMetadata oldMeta, BinaryMetadata newMeta, + @Nullable Set changedSchemas) { assert newMeta != null; - if (oldMeta == null) + if (oldMeta == null) { + if (changedSchemas != null) { + for (BinarySchema schema : newMeta.schemas()) + changedSchemas.add(schema.schemaId()); + } + return newMeta; + } else { assert oldMeta.typeId() == newMeta.typeId(); @@ -1036,8 +1056,12 @@ public static BinaryMetadata mergeMetadata(@Nullable BinaryMetadata oldMeta, Bin Collection mergedSchemas = new HashSet<>(oldMeta.schemas()); for (BinarySchema newSchema : newMeta.schemas()) { - if (mergedSchemas.add(newSchema)) + if (mergedSchemas.add(newSchema)) { changed = true; + + if (changedSchemas != null) + changedSchemas.add(newSchema.schemaId()); + } } // Return either old meta if no changes detected, or new merged meta. @@ -1141,7 +1165,7 @@ else if (isSpecialCollection(cls)) return BinaryWriteMode.COL; else if (isSpecialMap(cls)) return BinaryWriteMode.MAP; - else if (isEnum(cls)) + else if (U.isEnum(cls)) return BinaryWriteMode.ENUM; else if (cls == BinaryEnumObjectImpl.class) return BinaryWriteMode.BINARY_ENUM; @@ -1174,21 +1198,6 @@ public static boolean isSpecialMap(Class cls) { return HashMap.class.equals(cls) || LinkedHashMap.class.equals(cls); } - /** - * Check if class represents a Enum. - * - * @param cls Class. - * @return {@code True} if this is a Enum class. - */ - public static boolean isEnum(Class cls) { - if (cls.isEnum()) - return true; - - Class sCls = cls.getSuperclass(); - - return sCls != null && sCls.isEnum(); - } - /** * @return Value. */ @@ -1640,7 +1649,7 @@ public static Class doReadClass(BinaryInputStream in, BinaryContext ctx, ClassLo } // forces registering of class by type id, at least locally - ctx.descriptorForClass(cls, true); + ctx.descriptorForClass(cls, true, false); } return cls; @@ -1670,7 +1679,7 @@ public static Class resolveClass(BinaryContext ctx, int typeId, @Nullable String } // forces registering of class by type id, at least locally - ctx.descriptorForClass(cls, true); + ctx.descriptorForClass(cls, true, false); } return cls; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java index a7f645c1f4763..e6efb0c509a52 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/BinaryWriterExImpl.java @@ -82,6 +82,9 @@ public class BinaryWriterExImpl implements BinaryWriter, BinaryRawWriterEx, Obje /** */ private BinaryInternalMapper mapper; + /** */ + private boolean failIfUnregistered; + /** * @param ctx Context. */ @@ -112,6 +115,20 @@ public BinaryWriterExImpl(BinaryContext ctx, BinaryOutputStream out, BinaryWrite start = out.position(); } + /** + * @return Fail if unregistered flag value. + */ + public boolean failIfUnregistered() { + return failIfUnregistered; + } + + /** + * @param failIfUnregistered Fail if unregistered. + */ + public void failIfUnregistered(boolean failIfUnregistered) { + this.failIfUnregistered = failIfUnregistered; + } + /** * @param typeId Type ID. */ @@ -161,7 +178,7 @@ private void marshal0(Object obj, boolean enableReplace) throws BinaryObjectExce Class cls = obj.getClass(); - BinaryClassDescriptor desc = ctx.descriptorForClass(cls, false); + BinaryClassDescriptor desc = ctx.descriptorForClass(cls, false, failIfUnregistered); if (desc == null) throw new BinaryObjectException("Object is not binary: [class=" + cls + ']'); @@ -493,6 +510,8 @@ public void doWriteObject(@Nullable Object obj) throws BinaryObjectException { else { BinaryWriterExImpl writer = new BinaryWriterExImpl(ctx, out, schema, handles()); + writer.failIfUnregistered(failIfUnregistered); + writer.marshal(obj); } } @@ -724,7 +743,10 @@ void doWriteObjectArray(@Nullable Object[] val) throws BinaryObjectException { if (tryWriteAsHandle(val)) return; - BinaryClassDescriptor desc = ctx.descriptorForClass(val.getClass().getComponentType(), false); + BinaryClassDescriptor desc = ctx.descriptorForClass( + val.getClass().getComponentType(), + false, + failIfUnregistered); out.unsafeEnsure(1 + 4); out.unsafeWriteByte(GridBinaryMarshaller.OBJ_ARR); @@ -795,7 +817,7 @@ void doWriteEnum(@Nullable Enum val) { if (val == null) out.writeByte(GridBinaryMarshaller.NULL); else { - BinaryClassDescriptor desc = ctx.descriptorForClass(val.getDeclaringClass(), false); + BinaryClassDescriptor desc = ctx.descriptorForClass(val.getDeclaringClass(), false, failIfUnregistered); out.unsafeEnsure(1 + 4); @@ -848,7 +870,10 @@ void doWriteEnumArray(@Nullable Object[] val) { if (val == null) out.writeByte(GridBinaryMarshaller.NULL); else { - BinaryClassDescriptor desc = ctx.descriptorForClass(val.getClass().getComponentType(), false); + BinaryClassDescriptor desc = ctx.descriptorForClass( + val.getClass().getComponentType(), + false, + failIfUnregistered); out.unsafeEnsure(1 + 4); @@ -877,7 +902,7 @@ void doWriteClass(@Nullable Class val) { if (val == null) out.writeByte(GridBinaryMarshaller.NULL); else { - BinaryClassDescriptor desc = ctx.descriptorForClass(val, false); + BinaryClassDescriptor desc = ctx.descriptorForClass(val, false, failIfUnregistered); out.unsafeEnsure(1 + 4); @@ -906,7 +931,7 @@ public void doWriteProxy(Proxy proxy, Class[] intfs) { out.unsafeWriteInt(intfs.length); for (Class intf : intfs) { - BinaryClassDescriptor desc = ctx.descriptorForClass(intf, false); + BinaryClassDescriptor desc = ctx.descriptorForClass(intf, false, failIfUnregistered); if (desc.registered()) out.writeInt(desc.typeId()); @@ -1476,6 +1501,8 @@ void writeBinaryObjectField(@Nullable BinaryObjectImpl po) throws BinaryObjectEx else { BinaryWriterExImpl writer = new BinaryWriterExImpl(ctx, out, schema, null); + writer.failIfUnregistered(failIfUnregistered); + writer.marshal(obj); } } @@ -1899,6 +1926,8 @@ boolean tryWriteAsHandle(Object obj) { public BinaryWriterExImpl newWriter(int typeId) { BinaryWriterExImpl res = new BinaryWriterExImpl(ctx, out, schema, handles()); + res.failIfUnregistered(failIfUnregistered); + res.typeId(typeId); return res; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/GridBinaryMarshaller.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/GridBinaryMarshaller.java index d6c8abdcfb35a..743958932f21b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/GridBinaryMarshaller.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/GridBinaryMarshaller.java @@ -240,14 +240,17 @@ public GridBinaryMarshaller(BinaryContext ctx) { /** * @param obj Object to marshal. + * @param failIfUnregistered Throw exception if class isn't registered. * @return Byte array. * @throws org.apache.ignite.binary.BinaryObjectException In case of error. */ - public byte[] marshal(@Nullable Object obj) throws BinaryObjectException { + public byte[] marshal(@Nullable Object obj, boolean failIfUnregistered) throws BinaryObjectException { if (obj == null) return new byte[] { NULL }; try (BinaryWriterExImpl writer = new BinaryWriterExImpl(ctx)) { + writer.failIfUnregistered(failIfUnregistered); + writer.marshal(obj); return writer.array(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderEnum.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderEnum.java index bc5eb9e030b35..3930c463528e2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderEnum.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderEnum.java @@ -63,7 +63,7 @@ public BinaryBuilderEnum(BinaryBuilderReader reader) { throw new BinaryInvalidTypeException("Failed to load the class: " + clsName, e); } - this.typeId = reader.binaryContext().descriptorForClass(cls, false).typeId(); + this.typeId = reader.binaryContext().descriptorForClass(cls, false, false).typeId(); } else { this.typeId = typeId; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java index 018444c65123a..edc80b6feccef 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryBuilderSerializer.java @@ -27,6 +27,7 @@ import org.apache.ignite.internal.binary.BinaryUtils; import org.apache.ignite.internal.binary.BinaryWriterExImpl; import org.apache.ignite.internal.binary.GridBinaryMarshaller; +import org.apache.ignite.internal.util.IgniteUtils; /** * @@ -110,7 +111,7 @@ public void writeValue(BinaryWriterExImpl writer, Object val, boolean forceCol, return; } - if (BinaryUtils.isEnum(val.getClass())) { + if (IgniteUtils.isEnum(val.getClass())) { String clsName = ((Enum)val).getDeclaringClass().getName(); int typeId = writer.context().typeId(clsName); @@ -125,10 +126,10 @@ public void writeValue(BinaryWriterExImpl writer, Object val, boolean forceCol, BinaryMetadata meta = new BinaryMetadata(typeId, typeName, null, null, null, true, enumMap); - writer.context().updateMetadata(typeId, meta); + writer.context().updateMetadata(typeId, meta, writer.failIfUnregistered()); // Need register class for marshaller to be able to deserialize enum value. - writer.context().descriptorForClass(((Enum)val).getDeclaringClass(), false); + writer.context().descriptorForClass(((Enum)val).getDeclaringClass(), false, false); writer.writeByte(GridBinaryMarshaller.ENUM); writer.writeInt(typeId); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryEnumArrayLazyValue.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryEnumArrayLazyValue.java index 787ff638b995e..c0e79ec760594 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryEnumArrayLazyValue.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryEnumArrayLazyValue.java @@ -56,7 +56,7 @@ protected BinaryEnumArrayLazyValue(BinaryBuilderReader reader) { throw new BinaryInvalidTypeException("Failed to load the class: " + clsName, e); } - compTypeId = reader.binaryContext().descriptorForClass(cls, true).typeId(); + compTypeId = reader.binaryContext().descriptorForClass(cls, true, false).typeId(); } else { compTypeId = typeId; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectArrayLazyValue.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectArrayLazyValue.java index 8962107c77ac8..d4882dc6fb462 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectArrayLazyValue.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectArrayLazyValue.java @@ -55,7 +55,7 @@ protected BinaryObjectArrayLazyValue(BinaryBuilderReader reader) { throw new BinaryInvalidTypeException("Failed to load the class: " + clsName, e); } - compTypeId = reader.binaryContext().descriptorForClass(cls, true).typeId(); + compTypeId = reader.binaryContext().descriptorForClass(cls, true, false).typeId(); } else { compTypeId = typeId; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java index b9eb3e5e0267b..d3b0973ca3533 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/builder/BinaryObjectBuilderImpl.java @@ -36,6 +36,7 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; +import org.apache.ignite.thread.IgniteThread; import org.jetbrains.annotations.Nullable; import java.util.Collection; @@ -157,7 +158,7 @@ public BinaryObjectBuilderImpl(BinaryObjectImpl obj) { throw new BinaryInvalidTypeException("Failed to load the class: " + clsNameToWrite, e); } - this.typeId = ctx.descriptorForClass(cls, false).typeId(); + this.typeId = ctx.descriptorForClass(cls, false, false).typeId(); registeredType = false; @@ -174,6 +175,11 @@ public BinaryObjectBuilderImpl(BinaryObjectImpl obj) { /** {@inheritDoc} */ @Override public BinaryObject build() { try (BinaryWriterExImpl writer = new BinaryWriterExImpl(ctx)) { + Thread curThread = Thread.currentThread(); + + if (curThread instanceof IgniteThread) + writer.failIfUnregistered(((IgniteThread)curThread).isForbiddenToRequestBinaryMetadata()); + writer.typeId(typeId); BinaryBuilderSerializer serializationCtx = new BinaryBuilderSerializer(); @@ -357,10 +363,10 @@ else if (readCache == null) { if (affFieldName0 == null) affFieldName0 = ctx.affinityKeyFieldName(typeId); - ctx.registerUserClassName(typeId, typeName); + ctx.registerUserClassName(typeId, typeName, writer.failIfUnregistered()); ctx.updateMetadata(typeId, new BinaryMetadata(typeId, typeName, fieldsMeta, affFieldName0, - Collections.singleton(curSchema), false, null)); + Collections.singleton(curSchema), false, null), writer.failIfUnregistered()); schemaReg.addSchema(curSchema.schemaId(), curSchema); } @@ -422,7 +428,8 @@ else if (newVal.getClass().isArray() && newVal.getClass().getComponentType() == else if (!nullFieldVal) { String newFldTypeName = BinaryUtils.fieldTypeName(newFldTypeId); - if (!F.eq(newFldTypeName, oldFldTypeName)) { + if (!F.eq(newFldTypeName, oldFldTypeName) && + !oldFldTypeName.equals(BinaryUtils.fieldTypeName(GridBinaryMarshaller.OBJ))) { throw new BinaryObjectException( "Wrong value has been set [" + "typeName=" + (typeName == null ? meta.typeName() : typeName) + diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryAbstractOutputStream.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryAbstractOutputStream.java index 769031f667b29..023e71c111345 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryAbstractOutputStream.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryAbstractOutputStream.java @@ -29,6 +29,15 @@ public abstract class BinaryAbstractOutputStream extends BinaryAbstractStream /** Minimal capacity when it is reasonable to start doubling resize. */ private static final int MIN_CAP = 256; + /** + * The maximum size of array to allocate. + * Some VMs reserve some header words in an array. + * Attempts to allocate larger arrays may result in + * OutOfMemoryError: Requested array size exceeds VM limit + * @see java.util.ArrayList#MAX_ARRAY_SIZE + */ + protected static final int MAX_ARRAY_SIZE = Integer.MAX_VALUE - 8; + /** {@inheritDoc} */ @Override public void writeByte(byte val) { ensureCapacity(pos + 1); @@ -285,11 +294,17 @@ protected static int capacity(int curCap, int reqCap) { if (reqCap < MIN_CAP) newCap = MIN_CAP; + else if (reqCap > MAX_ARRAY_SIZE) + throw new IllegalArgumentException("Required capacity exceeds allowed. Required:" + reqCap); else { - newCap = curCap << 1; + newCap = Math.max(curCap, MIN_CAP); - if (newCap < reqCap) - newCap = reqCap; + while (newCap < reqCap) { + newCap = newCap << 1; + + if (newCap < 0) + newCap = MAX_ARRAY_SIZE; + } } return newCap; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryMemoryAllocatorChunk.java b/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryMemoryAllocatorChunk.java index 09f8c3fb57786..5bac13840f1ea 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryMemoryAllocatorChunk.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/binary/streams/BinaryMemoryAllocatorChunk.java @@ -35,7 +35,7 @@ public class BinaryMemoryAllocatorChunk { private int maxMsgSize; /** Last time array size is checked. */ - private long lastCheck = U.currentTimeMillis(); + private long lastCheckNanos = System.nanoTime(); /** Whether the holder is acquired or not. */ private boolean acquired; @@ -88,15 +88,15 @@ public void release(byte[] data, int maxMsgSize) { this.acquired = false; - long now = U.currentTimeMillis(); + long nowNanos = System.nanoTime(); - if (now - this.lastCheck >= CHECK_FREQ) { + if (U.nanosToMillis(nowNanos - lastCheckNanos) >= CHECK_FREQ) { int halfSize = data.length >> 1; if (this.maxMsgSize < halfSize) this.data = new byte[halfSize]; - this.lastCheck = now; + lastCheckNanos = nowNanos; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/GridClient.java b/modules/core/src/main/java/org/apache/ignite/internal/client/GridClient.java index 0cba5026ee046..0405a90cc005c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/GridClient.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/GridClient.java @@ -153,4 +153,11 @@ public interface GridClient extends AutoCloseable { * {@code try-with-resources} statement. */ @Override public void close(); -} \ No newline at end of file + + /** + * If client was not connected topology, throw last error encountered. + * + * @throws GridClientException If client was not connected + */ + public void throwLastError() throws GridClientException; +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientImpl.java index d045a62ff9371..21e09bf894450 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/GridClientImpl.java @@ -349,6 +349,11 @@ public GridClientTopology topology() { return top; } + /** {@inheritDoc} */ + @Override public void throwLastError() throws GridClientException { + top.nodes(); + } + /** * @return Connection manager. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientConnectionManagerAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientConnectionManagerAdapter.java index 829b188a823e3..fe0453f0076e4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientConnectionManagerAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientConnectionManagerAdapter.java @@ -38,6 +38,7 @@ import javax.net.ssl.SSLContext; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.client.GridClientClosedException; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientException; @@ -656,6 +657,11 @@ private NioListener(Logger log) { } } + /** {@inheritDoc} */ + @Override public void onFailure(FailureType failureType, Throwable failure) { + // No-op. + } + /** * Handles client handshake response. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java index 6e55df8ba2621..ecc1cdf32154a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/impl/connection/GridClientNioTcpConnection.java @@ -212,11 +212,11 @@ public class GridClientNioTcpConnection extends GridClientConnection { sock.setTcpNoDelay(tcpNoDelay); sock.setKeepAlive(true); - final long startConnTime = U.currentTimeMillis(); + final long startConnTime = System.currentTimeMillis(); sock.connect(srvAddr, connectTimeout); - final long connTimeoutRest = connectTimeout - (U.currentTimeMillis() - startConnTime); + final long connTimeoutRest = connectTimeout - (System.currentTimeMillis() - startConnTime); GridClientFuture handshakeFut = new GridClientFutureAdapter<>(); @@ -433,7 +433,7 @@ private GridClientFutureAdapter makeRequest(GridClientMessage msg, final assert msg != null; if (msg instanceof GridClientPingPacket) { - long now = U.currentTimeMillis(); + long now = System.currentTimeMillis(); if (Math.min(now, lastPingRcvTime) - lastPingSndTime >= pingTimeout) close(FAILED, false, @@ -472,7 +472,7 @@ else if (now - lastPingSndTime > pingInterval && lastPingRcvTime != Long.MAX_VAL GridNioFuture sndFut = ses.send(msg); - lastMsgSndTime = U.currentTimeMillis(); + lastMsgSndTime = System.currentTimeMillis(); if (routeMode) { sndFut.listen(new CI1>() { @@ -506,7 +506,7 @@ else if (now - lastPingSndTime > pingInterval && lastPingRcvTime != Long.MAX_VAL * Handles ping response. */ void handlePingResponse() { - lastPingRcvTime = U.currentTimeMillis(); + lastPingRcvTime = System.currentTimeMillis(); } /** @@ -517,7 +517,7 @@ void handlePingResponse() { */ @SuppressWarnings({"unchecked", "TooBroadScope"}) void handleResponse(GridClientMessage res) throws IOException { - lastMsgRcvTime = U.currentTimeMillis(); + lastMsgRcvTime = System.currentTimeMillis(); TcpClientFuture fut = pendingReqs.get(res.requestId()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/marshaller/jdk/GridClientJdkMarshaller.java b/modules/core/src/main/java/org/apache/ignite/internal/client/marshaller/jdk/GridClientJdkMarshaller.java index 87bc7aa0de9b0..bd89e80b5d383 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/marshaller/jdk/GridClientJdkMarshaller.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/marshaller/jdk/GridClientJdkMarshaller.java @@ -19,13 +19,16 @@ import java.io.ByteArrayInputStream; import java.io.IOException; +import java.io.InputStream; import java.io.ObjectInput; import java.io.ObjectInputStream; import java.io.ObjectOutput; import java.io.ObjectOutputStream; +import java.io.ObjectStreamClass; import java.nio.ByteBuffer; import org.apache.ignite.internal.client.marshaller.GridClientMarshaller; import org.apache.ignite.internal.util.io.GridByteArrayOutputStream; +import org.apache.ignite.lang.IgnitePredicate; /** * Simple marshaller that utilize JDK serialization features. @@ -34,6 +37,23 @@ public class GridClientJdkMarshaller implements GridClientMarshaller { /** ID. */ public static final byte ID = 2; + /** Class name filter. */ + private final IgnitePredicate clsFilter; + + /** + * Default constructor. + */ + public GridClientJdkMarshaller() { + this(null); + } + + /** + * @param clsFilter Class filter. + */ + public GridClientJdkMarshaller(IgnitePredicate clsFilter) { + this.clsFilter = clsFilter; + } + /** {@inheritDoc} */ @Override public ByteBuffer marshal(Object obj, int off) throws IOException { GridByteArrayOutputStream bOut = new GridByteArrayOutputStream(); @@ -60,7 +80,7 @@ public class GridClientJdkMarshaller implements GridClientMarshaller { @Override public T unmarshal(byte[] bytes) throws IOException { ByteArrayInputStream tmp = new ByteArrayInputStream(bytes); - ObjectInput in = new ObjectInputStream(tmp); + ObjectInput in = new ClientJdkInputStream(tmp, clsFilter); try { return (T)in.readObject(); @@ -69,4 +89,33 @@ public class GridClientJdkMarshaller implements GridClientMarshaller { throw new IOException("Failed to unmarshal target object: " + e.getMessage(), e); } } + + /** + * Wrapper with class resolving control. + */ + private static class ClientJdkInputStream extends ObjectInputStream { + /** Class name filter. */ + private final IgnitePredicate clsFilter; + + + /** + * @param in Input stream. + * @param clsFilter Class filter. + */ + public ClientJdkInputStream(InputStream in, IgnitePredicate clsFilter) throws IOException { + super(in); + + this.clsFilter = clsFilter; + } + + /** {@inheritDoc} */ + @Override protected Class resolveClass(ObjectStreamClass desc) throws IOException, ClassNotFoundException { + String clsName = desc.getName(); + + if (clsFilter != null && !clsFilter.apply(clsName)) + throw new RuntimeException("Deserialization of class " + clsName + " is disallowed."); + + return super.resolveClass(desc); + } + } } \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridRouterClientImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridRouterClientImpl.java index 59833fcf975c6..f3c9d39d6526e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridRouterClientImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridRouterClientImpl.java @@ -219,4 +219,9 @@ public void stop(boolean wait) { @Override public void close() { clientImpl.close(); } -} \ No newline at end of file + + /** {@inheritDoc} */ + @Override public void throwLastError() throws GridClientException { + clientImpl.throwLastError(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridTcpRouterNioListenerAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridTcpRouterNioListenerAdapter.java index 22f5152a2ea0c..4c61eade7c6e3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridTcpRouterNioListenerAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/router/impl/GridTcpRouterNioListenerAdapter.java @@ -23,7 +23,10 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.client.GridClientException; import org.apache.ignite.internal.client.GridClientFuture; import org.apache.ignite.internal.client.GridClientFutureListener; @@ -41,6 +44,8 @@ import org.apache.ignite.internal.util.nio.GridNioServerListener; import org.apache.ignite.internal.util.nio.GridNioSession; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgnitePredicate; +import org.apache.ignite.marshaller.MarshallerUtils; import org.apache.ignite.plugin.PluginProvider; import org.jetbrains.annotations.Nullable; @@ -86,7 +91,15 @@ public GridTcpRouterNioListenerAdapter(IgniteLogger log, GridRouterClientImpl cl marshMap.put(GridClientOptimizedMarshaller.ID, optdMarsh); marshMap.put(GridClientZipOptimizedMarshaller.ID, new GridClientZipOptimizedMarshaller(optdMarsh, providers)); - marshMap.put(GridClientJdkMarshaller.ID, new GridClientJdkMarshaller()); + + try { + IgnitePredicate clsFilter = MarshallerUtils.classNameFilter(this.getClass().getClassLoader()); + + marshMap.put(GridClientJdkMarshaller.ID, new GridClientJdkMarshaller(clsFilter)); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } init(); } @@ -149,8 +162,7 @@ public GridTcpRouterNioListenerAdapter(IgniteLogger log, GridRouterClientImpl cl U.warn( log, - "Message forwarding was interrupted (will ignore last message): " + e.getMessage(), - "Message forwarding was interrupted."); + "Message forwarding was interrupted (will ignore last message): " + e.getMessage()); } } else if (msg instanceof GridClientHandshakeRequest) { @@ -190,6 +202,11 @@ else if (msg instanceof GridClientPingPacket) throw new IllegalArgumentException("Unsupported input message: " + msg); } + /** {@inheritDoc} */ + @Override public void onFailure(FailureType failureType, Throwable failure) { + // No-op. + } + /** {@inheritDoc} */ @Override public void onSessionWriteTimeout(GridNioSession ses) { U.warn(log, "Closing NIO session because of write timeout."); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java index b651ae38041fe..c623175283630 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/ssl/GridSslBasicContextFactory.java @@ -23,17 +23,20 @@ import java.io.InputStream; import java.security.GeneralSecurityException; import java.security.KeyStore; -import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.Arrays; +import java.util.Collection; import javax.cache.configuration.Factory; import javax.net.ssl.KeyManagerFactory; import javax.net.ssl.SSLContext; import javax.net.ssl.SSLException; +import javax.net.ssl.SSLParameters; import javax.net.ssl.TrustManager; import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509TrustManager; +import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.A; +import org.apache.ignite.ssl.SSLContextWrapper; /** * Basic ssl context factory that provides ssl context configuration with specified key @@ -87,6 +90,12 @@ public class GridSslBasicContextFactory implements GridSslContextFactory { /** Trust managers. */ private TrustManager[] trustMgrs; + /** Enabled cipher suites. */ + private String[] cipherSuites; + + /** Enabled protocols. */ + private String[] protocols; + /** * Gets key store type used for context creation. * @@ -269,6 +278,63 @@ public void setTrustManagers(TrustManager... trustMgrs) { this.trustMgrs = trustMgrs; } + /** + * Gets enabled cipher suites. + * + * @return Enabled cipher suites. + */ + public String[] getCipherSuites() { + return cipherSuites; + } + + /** + * Sets enabled cipher suites. + * + * @param cipherSuites Enabled cipher suites. + */ + public void setCipherSuites(String... cipherSuites) { + this.cipherSuites = cipherSuites; + } + + + /** + * Sets enabled cipher suites. + * + * @param cipherSuites Enabled cipher suites. + */ + public void setCipherSuites(Collection cipherSuites) { + if (!F.isEmpty(cipherSuites)) + setCipherSuites(cipherSuites.toArray(new String[0])); + } + + /** + * Gets enabled protocols. + * + * @return Enabled protocols. + */ + public String[] getProtocols() { + return protocols; + } + + /** + * Sets enabled protocols. + * + * @param protocols Enabled protocols. + */ + public void setProtocols(String... protocols) { + this.protocols = protocols; + } + + /** + * Sets enabled protocols. + * + * @param protocols Enabled protocols. + */ + public void setProtocols(Collection protocols) { + if (!F.isEmpty(protocols)) + setProtocols(protocols.toArray(new String[0])); + } + /** * Returns an instance of trust manager that will always succeed regardless of certificate provided. * @@ -303,6 +369,18 @@ public static TrustManager getDisabledTrustManager() { SSLContext ctx = SSLContext.getInstance(proto); + if (cipherSuites != null || protocols != null) { + SSLParameters sslParameters = new SSLParameters(); + + if (cipherSuites != null) + sslParameters.setCipherSuites(cipherSuites); + + if (protocols != null) + sslParameters.setProtocols(protocols); + + ctx = new SSLContextWrapper(ctx, sslParameters); + } + ctx.init(keyMgrFactory.getKeyManagers(), mgrs, null); return ctx; @@ -431,14 +509,12 @@ private static class DisabledX509TrustManager implements X509TrustManager { private static final X509Certificate[] CERTS = new X509Certificate[0]; /** {@inheritDoc} */ - @Override public void checkClientTrusted(X509Certificate[] x509Certificates, String s) - throws CertificateException { + @Override public void checkClientTrusted(X509Certificate[] x509Certificates, String s) { // No-op, all clients are trusted. } /** {@inheritDoc} */ - @Override public void checkServerTrusted(X509Certificate[] x509Certificates, String s) - throws CertificateException { + @Override public void checkServerTrusted(X509Certificate[] x509Certificates, String s) { // No-op, all servers are trusted. } @@ -447,4 +523,4 @@ private static class DisabledX509TrustManager implements X509TrustManager { return CERTS; } } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinary.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinary.java index 8525f5edd37aa..4164532848bc6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinary.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinary.java @@ -160,7 +160,7 @@ class ClientBinary implements IgniteBinary { int typeId = ctx.typeId(typeName); - ctx.updateMetadata(typeId, new BinaryMetadata(typeId, typeName, null, null, null, true, vals)); + ctx.updateMetadata(typeId, new BinaryMetadata(typeId, typeName, null, null, null, true, vals), false); return ctx.metadata(typeId); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinaryMarshaller.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinaryMarshaller.java index c68b8f909e3fa..aac6873e95fad 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinaryMarshaller.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientBinaryMarshaller.java @@ -66,7 +66,7 @@ public T unmarshal(BinaryInputStream in) { * Serializes Java object into a byte array. */ public byte[] marshal(Object obj) { - return impl.marshal(obj); + return impl.marshal(obj, false); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientChannel.java index 71502a4760483..eb62c808225f9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientChannel.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientChannel.java @@ -22,6 +22,7 @@ import org.apache.ignite.internal.binary.streams.BinaryInputStream; import org.apache.ignite.internal.binary.streams.BinaryOutputStream; import org.apache.ignite.client.ClientConnectionException; +import org.apache.ignite.client.ClientAuthorizationException; /** * Processing thin client requests and responses. @@ -41,5 +42,5 @@ interface ClientChannel extends AutoCloseable { * @return Received operation payload or {@code null} if response has no payload. */ public T receive(ClientOperation op, long reqId, Function payloadReader) - throws ClientConnectionException; + throws ClientConnectionException, ClientAuthorizationException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientQueryCursor.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientQueryCursor.java index 9367cfd52a0af..086fab875bbf3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientQueryCursor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ClientQueryCursor.java @@ -54,6 +54,7 @@ class ClientQueryCursor implements QueryCursor { pager.close(); } catch (Exception ignored) { + // No-op. } } @@ -76,7 +77,10 @@ class ClientQueryCursor implements QueryCursor { currPageIt = currPage.iterator(); } catch (ClientException e) { - throw new RuntimeException("Failed to retrieve query results", e); + throw e; + } + catch (Exception e) { + throw new ClientException("Failed to retrieve query results", e); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java index 392b8f87ab63c..c9e4e5d58278a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/ReliableChannel.java @@ -38,6 +38,7 @@ import org.apache.ignite.internal.binary.streams.BinaryInputStream; import org.apache.ignite.internal.binary.streams.BinaryOutputStream; import org.apache.ignite.internal.util.HostAndPortRange; +import org.apache.ignite.internal.util.typedef.F; /** * Adds failover abd thread-safety to {@link ClientChannel}. @@ -81,9 +82,26 @@ final class ReliableChannel implements AutoCloseable { primary = addrs.get(new Random().nextInt(addrs.size())); // we already verified there is at least one address - for (InetSocketAddress a : addrs) + for (InetSocketAddress a : addrs) { if (a != primary) this.backups.add(a); + } + + ClientConnectionException lastEx = null; + + for (int i = 0; i < addrs.size(); i++) { + try { + ch = chFactory.apply(new ClientChannelConfiguration(clientCfg).setAddress(primary)).get(); + + return; + } catch (ClientConnectionException e) { + lastEx = e; + + changeServer(); + } + } + + throw lastEx; } /** {@inheritDoc} */ @@ -164,6 +182,9 @@ public void request(ClientOperation op, Consumer payloadWrit * @return host:port_range address lines parsed as {@link InetSocketAddress}. */ private static List parseAddresses(String[] addrs) throws ClientException { + if (F.isEmpty(addrs)) + throw new ClientException("Empty addresses"); + Collection ranges = new ArrayList<>(addrs.length); for (String a : addrs) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java index 404793a83f338..10dc8652071f9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpClientChannel.java @@ -50,6 +50,7 @@ import javax.net.ssl.TrustManagerFactory; import javax.net.ssl.X509TrustManager; import org.apache.ignite.client.ClientAuthenticationException; +import org.apache.ignite.client.ClientAuthorizationException; import org.apache.ignite.client.ClientConnectionException; import org.apache.ignite.client.SslMode; import org.apache.ignite.client.SslProtocol; @@ -62,6 +63,7 @@ import org.apache.ignite.internal.binary.streams.BinaryInputStream; import org.apache.ignite.internal.binary.streams.BinaryOffheapOutputStream; import org.apache.ignite.internal.binary.streams.BinaryOutputStream; +import org.apache.ignite.internal.processors.platform.client.ClientStatus; /** * Implements {@link ClientChannel} over TCP. @@ -138,7 +140,8 @@ class TcpClientChannel implements ClientChannel { /** {@inheritDoc} */ public T receive(ClientOperation op, long reqId, Function payloadReader) - throws ClientConnectionException { + throws ClientConnectionException, ClientAuthorizationException { + final int MIN_RES_SIZE = 8 + 4; // minimal response size: long (8 bytes) ID + int (4 bytes) status int resSize = new BinaryHeapInputStream(read(4)).readInt(); @@ -163,7 +166,12 @@ public T receive(ClientOperation op, long reqId, Function 0) + errCode = r.readInt(); + + if (errCode == ClientStatus.AUTH_FAILED) + throw new ClientAuthenticationException(err); else if (ver.equals(srvVer)) throw new ClientProtocolError(err); else if (!supportedVers.contains(srvVer) || @@ -539,16 +552,10 @@ private static TrustManager[] getTrustManagers( /** */ private static KeyStore loadKeyStore(String lb, String path, String type, char[] pwd) { - InputStream in = null; + KeyStore store; try { - KeyStore store = KeyStore.getInstance(type); - - in = new FileInputStream(new File(path)); - - store.load(in, pwd); - - return store; + store = KeyStore.getInstance(type); } catch (KeyStoreException e) { throw new ClientError( @@ -556,6 +563,13 @@ private static KeyStore loadKeyStore(String lb, String path, String type, char[] e ); } + + try (InputStream in = new FileInputStream(new File(path))) { + + store.load(in, pwd); + + return store; + } catch (FileNotFoundException e) { throw new ClientError(String.format("%s key store file [%s] does not exist", lb, path), e); } @@ -571,16 +585,6 @@ private static KeyStore loadKeyStore(String lb, String path, String type, char[] catch (IOException e) { throw new ClientError(String.format("Could not read %s key store", lb), e); } - finally { - if (in != null) { - try { - in.close(); - } - catch (IOException ignored) { - // Fail silently - } - } - } } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java index 7beeb799cbf79..e7f71cb2df222 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/client/thin/TcpIgniteClient.java @@ -233,7 +233,7 @@ private class ClientBinaryMetadataHandler implements BinaryMetadataHandler { private final BinaryMetadataHandler cache = BinaryCachingMetadataHandler.create(); /** {@inheritDoc} */ - @Override public void addMeta(int typeId, BinaryType meta) throws BinaryObjectException { + @Override public void addMeta(int typeId, BinaryType meta, boolean failIfUnregistered) throws BinaryObjectException { if (cache.metadata(typeId) == null) { try { ch.request( @@ -246,7 +246,7 @@ private class ClientBinaryMetadataHandler implements BinaryMetadataHandler { } } - cache.addMeta(typeId, meta); // merge + cache.addMeta(typeId, meta, failIfUnregistered); // merge } /** {@inheritDoc} */ @@ -259,7 +259,7 @@ private class ClientBinaryMetadataHandler implements BinaryMetadataHandler { if (meta0 != null) { meta = new BinaryTypeImpl(marsh.context(), meta0); - cache.addMeta(typeId, meta); + cache.addMeta(typeId, meta, false); } } @@ -314,8 +314,12 @@ private class ClientMarshallerContext implements MarshallerContext { private Map cache = new ConcurrentHashMap<>(); /** {@inheritDoc} */ - @Override public boolean registerClassName(byte platformId, int typeId, String clsName) - throws IgniteCheckedException { + @Override public boolean registerClassName( + byte platformId, + int typeId, + String clsName, + boolean failIfUnregistered + ) throws IgniteCheckedException { if (platformId != MarshallerPlatformIds.JAVA_ID) throw new IllegalArgumentException("platformId"); @@ -345,6 +349,13 @@ private class ClientMarshallerContext implements MarshallerContext { return res; } + /** {@inheritDoc} */ + @Override + @Deprecated + public boolean registerClassName(byte platformId, int typeId, String clsName) throws IgniteCheckedException { + return registerClassName(platformId, typeId, clsName, false); + } + /** {@inheritDoc} */ @Override public boolean registerClassNameLocally(byte platformId, int typeId, String clsName) { if (platformId != MarshallerPlatformIds.JAVA_ID) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java index 2c72bb02ed2f5..a3a69e19d9659 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/DetachedClusterNode.java @@ -30,7 +30,9 @@ import org.jetbrains.annotations.Nullable; /** - * Representation of cluster node that isn't currently present in cluster. + * Representation of cluster node that either isn't currently present in cluster, or semantically detached. + * For example nodes returned from {@code BaselineTopology.currentBaseline()} are always considered as + * semantically detached, even if they are currently present in cluster. */ public class DetachedClusterNode implements ClusterNode { /** */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterAsyncImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterAsyncImpl.java index 43e97b5e4ac94..d79710db8759c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterAsyncImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterAsyncImpl.java @@ -171,6 +171,16 @@ public IgniteClusterAsyncImpl(IgniteClusterImpl cluster) { cluster.enableStatistics(caches, enabled); } + /** {@inheritDoc} */ + @Override public void clearStatistics(Collection caches) { + cluster.clearStatistics(caches); + } + + /** {@inheritDoc} */ + @Override public void setTxTimeoutOnPartitionMapExchange(long timeout) { + cluster.setTxTimeoutOnPartitionMapExchange(timeout); + } + /** {@inheritDoc} */ @Override public Ignite ignite() { return cluster.ignite(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java index b69923b5bb783..9e76fba697c62 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/IgniteClusterImpl.java @@ -29,7 +29,10 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; +import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentLinkedQueue; @@ -409,6 +412,22 @@ private void validateBeforeBaselineChange(Collection bas if (baselineTop.isEmpty()) throw new IgniteException("BaselineTopology must contain at least one node."); + List currBlT = Optional.ofNullable(ctx.state().clusterState().baselineTopology()). + map(BaselineTopology::currentBaseline).orElse(Collections.emptyList()); + + Collection srvrs = ctx.cluster().get().forServers().nodes(); + + for (BaselineNode node : baselineTop) { + Object consistentId = node.consistentId(); + + if (currBlT.stream().noneMatch( + currBlTNode -> Objects.equals(currBlTNode.consistentId(), consistentId)) && + srvrs.stream().noneMatch( + currServersNode -> Objects.equals(currServersNode.consistentId(), consistentId))) + throw new IgniteException("Check arguments. Node with consistent ID [" + consistentId + + "] not found in server nodes."); + } + Collection onlineNodes = onlineBaselineNodesRequestedForRemoval(baselineTop); if (onlineNodes != null) { @@ -470,7 +489,7 @@ private Collection getConsistentIds(Collection n Collection target = new ArrayList<>(top.size()); for (ClusterNode node : top) { - if (!node.isClient()) + if (!node.isClient() && !node.isDaemon()) target.add(node); } @@ -501,6 +520,36 @@ private Collection getConsistentIds(Collection n } } + /** {@inheritDoc} */ + @Override public void clearStatistics(Collection caches) { + guard(); + + try { + ctx.cache().clearStatistics(caches); + } + catch (IgniteCheckedException e) { + throw U.convertException(e); + } + finally { + unguard(); + } + } + + /** {@inheritDoc} */ + @Override public void setTxTimeoutOnPartitionMapExchange(long timeout) { + guard(); + + try { + ctx.cache().setTxTimeoutOnPartitionMapExchange(timeout); + } + catch (IgniteCheckedException e) { + throw U.convertException(e); + } + finally { + unguard(); + } + } + /** {@inheritDoc} */ @Override public IgniteCluster withAsync() { return new IgniteClusterAsyncImpl(this); @@ -679,7 +728,7 @@ IgniteInternalFuture> startNodesAsync0( Collections.emptyList()); // Exceeding max line width for readability. - GridCompoundFuture> fut = + GridCompoundFuture> fut = new GridCompoundFuture<>(CU.objectsReducer()); AtomicInteger cnt = new AtomicInteger(nodeCallCnt); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/graph/BitSetIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/graph/BitSetIterator.java new file mode 100644 index 0000000000000..3a5cf9f21ea70 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/graph/BitSetIterator.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.cluster.graph; + +import java.util.BitSet; +import java.util.Iterator; +import java.util.NoSuchElementException; + +/** + * Iterator over set bits in {@link BitSet}. + */ +public class BitSetIterator implements Iterator { + /** Bitset. */ + private final BitSet bitSet; + + /** Current index. */ + private int idx = -1; + + /** + * @param bitSet Bitset. + */ + public BitSetIterator(BitSet bitSet) { + this.bitSet = bitSet; + + advance(); + } + + /** + * Find index of the next set bit. + */ + private void advance() { + idx = bitSet.nextSetBit(idx + 1); + } + + /** {@inheritDoc} */ + @Override public boolean hasNext() { + return idx != -1; + } + + /** {@inheritDoc} */ + @Override public Integer next() throws NoSuchElementException { + if (idx == -1) + throw new NoSuchElementException(); + + int res = idx; + + advance(); + + return res; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/graph/ClusterGraph.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/graph/ClusterGraph.java new file mode 100644 index 0000000000000..ba56c3386e34e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/graph/ClusterGraph.java @@ -0,0 +1,207 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.cluster.graph; + +import java.util.ArrayList; +import java.util.BitSet; +import java.util.Iterator; +import java.util.List; +import java.util.function.Predicate; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.CommunicationFailureContext; + +/** + * Class to represent cluster nodes avalaible connections as graph. + * Provides several graph algorithms to analyze cluster nodes connections. + */ +public class ClusterGraph { + /** Number of all cluster nodes. */ + private final int nodeCnt; + + /** List of the all cluster nodes. */ + private final List nodes; + + /** Connectivity (adjacency) matrix between cluster nodes. */ + private final BitSet[] connections; + + /** Fully-connected component searcher. */ + private final FullyConnectedComponentSearcher fccSearcher; + + /** + * Constructor. + * + * @param ctx Communication failure context. + * @param nodeFilterOut Filter to exclude some cluster nodes from graph. + */ + public ClusterGraph(CommunicationFailureContext ctx, Predicate nodeFilterOut) { + nodes = ctx.topologySnapshot(); + + nodeCnt = nodes.size(); + + assert nodeCnt > 0; + + connections = buildConnectivityMatrix(ctx, nodeFilterOut); + + fccSearcher = new FullyConnectedComponentSearcher(connections); + } + + /** + * Builds connectivity matrix (adjacency matrix) for all cluster nodes. + * + * @param ctx Communication failure context. + * @param nodeFilterOut Filter to exclude some cluster nodes from graph. + * @return Connections bit set for each node, where set bit means avalable connection. + */ + private BitSet[] buildConnectivityMatrix(CommunicationFailureContext ctx, Predicate nodeFilterOut) { + BitSet[] connections = new BitSet[nodeCnt]; + + for (int i = 0; i < nodeCnt; i++) { + ClusterNode node = nodes.get(i); + + if (nodeFilterOut.test(node)) { + connections[i] = null; + continue; + } + + connections[i] = new BitSet(nodeCnt); + for (int j = 0; j < nodeCnt; j++) { + ClusterNode to = nodes.get(j); + + if (nodeFilterOut.test(to)) + continue; + + if (i == j || ctx.connectionAvailable(node, to)) + connections[i].set(j); + } + } + + // Remove unidirectional connections (node A can connect to B, but B can't connect to A). + for (int i = 0; i < nodeCnt; i++) + for (int j = i + 1; j < nodeCnt; j++) { + if (connections[i] == null || connections[j] == null) + continue; + + if (connections[i].get(j) ^ connections[j].get(i)) { + connections[i].set(j, false); + connections[j].set(i, false); + } + } + + return connections; + } + + /** + * Finds connected components in cluster graph. + * + * @return List of set of nodes, each set represents connected component. + */ + public List findConnectedComponents() { + List connectedComponets = new ArrayList<>(); + + BitSet visitSet = new BitSet(nodeCnt); + + for (int i = 0; i < nodeCnt; i++) { + if (visitSet.get(i) || connections[i] == null) + continue; + + BitSet currComponent = new BitSet(nodeCnt); + + dfs(i, currComponent, visitSet); + + connectedComponets.add(currComponent); + } + + return connectedComponets; + } + + /** + * Deep-first search to find connected components in connections graph. + * + * @param nodeIdx Current node index to traverse from. + * @param currComponent Current connected component to populate. + * @param allVisitSet Set of the visited nodes in whole graph during traversal. + */ + private void dfs(int nodeIdx, BitSet currComponent, BitSet allVisitSet) { + assert !allVisitSet.get(nodeIdx) + : "Incorrect node visit " + nodeIdx; + + assert connections[nodeIdx] != null + : "Incorrect node visit. Node has not passed filter " + nodes.get(nodeIdx); + + allVisitSet.set(nodeIdx); + + currComponent.set(nodeIdx); + + for (int toIdx = 0; toIdx < nodeCnt; toIdx++) { + if (toIdx == nodeIdx || allVisitSet.get(toIdx) || connections[toIdx] == null) + continue; + + boolean connected = connections[nodeIdx].get(toIdx) && connections[toIdx].get(nodeIdx); + + if (connected) + dfs(toIdx, currComponent, allVisitSet); + } + } + + /** + * Finds largest fully-connected component from given {@code nodesSet}. + * + * @param nodesSet Set of nodes. + * @return Set of nodes which forms largest fully-connected component. + */ + public BitSet findLargestFullyConnectedComponent(BitSet nodesSet) { + // Check that current set is already fully connected. + boolean fullyConnected = checkFullyConnected(nodesSet); + + if (fullyConnected) + return nodesSet; + + BitSet res = fccSearcher.findLargest(nodesSet); + + assert checkFullyConnected(res) + : "Not fully connected component was found [result=" + res + ", nodesSet=" + nodesSet + "]"; + + return res; + } + + /** + * Checks that given {@code nodesSet} forms fully-connected component. + * + * @param nodesSet Set of cluster nodes. + * @return {@code True} if all given cluster nodes are able to connect to each other. + */ + public boolean checkFullyConnected(BitSet nodesSet) { + int maxIdx = nodesSet.length(); + + Iterator it = new BitSetIterator(nodesSet); + + while (it.hasNext()) { + int idx = it.next(); + + for (int i = 0; i < maxIdx; i++) { + if (i == idx) + continue; + + if (nodesSet.get(i) && !connections[idx].get(i)) + return false; + } + } + + return true; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/cluster/graph/FullyConnectedComponentSearcher.java b/modules/core/src/main/java/org/apache/ignite/internal/cluster/graph/FullyConnectedComponentSearcher.java new file mode 100644 index 0000000000000..9a8098eb3de70 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/cluster/graph/FullyConnectedComponentSearcher.java @@ -0,0 +1,341 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.cluster.graph; + +import java.util.Arrays; +import java.util.BitSet; +import java.util.Comparator; +import java.util.Iterator; + +/** + * Class to find (possibly) largest fully-connected component (also can be called as complete subgraph) in graph. + * This problem is also known as Clique problem which is NP-complete. + * + * For small number of nodes simple brute-force algorithm is used which finds such component guaranteed. + * For large number of nodes some sort of greedy heuristic is used which works well for real-life scenarios + * but doesn't guarantee to find largest component, however very close to ideal result. + */ +public class FullyConnectedComponentSearcher { + /** The maximal number of nodes when bruteforce algorithm will be used. */ + private static final int BRUTE_FORCE_THRESHOULD = 24; + + /** Number of nodes in connections graph. */ + private final int totalNodesCnt; + + /** Adjacency matrix. */ + private final BitSet[] connections; + + /** + * Constructor. + * + * @param connections Adjacency matrix. + */ + public FullyConnectedComponentSearcher(BitSet[] connections) { + this.connections = connections; + totalNodesCnt = connections.length; + } + + /** + * Find largest fully connected component from presented set of the nodes {@code where}. + * + * @param where Set of nodes where fully connected component must be found. + * @return Set of nodes forming fully connected component. + */ + public BitSet findLargest(BitSet where) { + int nodesCnt = where.cardinality(); + + if (nodesCnt <= BRUTE_FORCE_THRESHOULD) + return bruteforce(nodesCnt, where); + + // Return best of the 2 heuristics. + BitSet e1 = heuristic1(where); + BitSet e2 = heuristic2(where); + + return e1.cardinality() > e2.cardinality() ? e1 : e2; + } + + /** + * Extract node indexes (set bits) from given {@code selectedSet} to integer array. + * + * @param selectedNodesCnt Number of nodes. + * @param selectedSet Set of nodes. + * @return Arrays which contains node indexes. + */ + private Integer[] extractNodeIndexes(int selectedNodesCnt, BitSet selectedSet) { + Integer[] indexes = new Integer[selectedNodesCnt]; + Iterator it = new BitSetIterator(selectedSet); + int i = 0; + + while (it.hasNext()) + indexes[i++] = it.next(); + + assert i == indexes.length + : "Extracted not all indexes [nodesCnt=" + selectedNodesCnt + ", extracted=" + i + ", set=" + selectedSet + "]"; + + return indexes; + } + + /** + * Sorts nodes using {@link ConnectionsComparator} + * and runs greedy algorithm {@link #greedyIterative(int, Integer[])} on it. + * + * @param selectedSet Set of nodes used to form fully-connected component. + * @return Subset of given {@code selectedSet} which forms fully connected component. + */ + private BitSet heuristic1(BitSet selectedSet) { + int selectedNodesCnt = selectedSet.cardinality(); + Integer[] nodeIndexes = extractNodeIndexes(selectedNodesCnt, selectedSet); + + Arrays.sort(nodeIndexes, new ConnectionsComparator(totalNodesCnt)); + + return greedyIterative(selectedNodesCnt, nodeIndexes); + } + + /** + * Exactly the same thing as in {@link #heuristic1(BitSet)} but using reversed {@link ConnectionsComparator}. + * + * @param selectedSet Set of nodes used to form fully-connected component. + * @return Subset of given {@code selectedSet} which forms fully connected component. + */ + private BitSet heuristic2(BitSet selectedSet) { + int selectedNodesCnt = selectedSet.cardinality(); + Integer[] nodeIndexes = extractNodeIndexes(selectedNodesCnt, selectedSet); + + Arrays.sort(nodeIndexes, new ConnectionsComparator(totalNodesCnt).reversed()); + + return greedyIterative(selectedNodesCnt, nodeIndexes); + } + + /** + * Finds fully-connected component between given {@code nodeIndexes} and tries to maximize size of it. + * + * The main idea of the algorithm is that after specific sorting, + * nodes able to form fully-connected will be placed closer to each other in given {@code nodeIndexes} array. + * While nodes not able to form will be placed further. + * + * At the begging of algorithm we form global set of nodes can be used to form fully-connected component. + * We iterate over this set and try to add each node to current fully-connected component, which is empty at the beginning. + * + * When we add node to the component we need to check that after adding new component is also fully-connected. + * See {@link #joinNode(BitSet, int, Integer[])}. + * + * After end of iteration we exclude nodes which formed fully-connected from the global set and run iteration again and again + * on remaining nodes, while the global set will not be empty. + * + * Complexity is O(N^2), where N is number of nodes. + * + * @param selectedNodesCnt Number of nodes. + * @param nodeIndexes Node indexes used to form fully-connected component. + * @return Subset of given {@code nodeIndexes} which forms fully connected component. + */ + private BitSet greedyIterative(int selectedNodesCnt, Integer[] nodeIndexes) { + // Set of the nodes which can be used to form fully connected component. + BitSet canUse = new BitSet(selectedNodesCnt); + for (int i = 0; i < selectedNodesCnt; i++) + canUse.set(i); + + BitSet bestRes = null; + + while (!canUse.isEmpty()) { + // Even if we pick all possible nodes, their size will not be greater than current best result. + // No needs to run next iteration in this case. + if (bestRes != null && canUse.cardinality() <= bestRes.cardinality()) + break; + + BitSet currRes = new BitSet(selectedNodesCnt); + + Iterator canUseIter = new BitSetIterator(canUse); + while (canUseIter.hasNext()) { + /* Try to add node to the current set that forms fully connected component. + Node will be skipped if after adding, current set loose fully connectivity. */ + int pickedIdx = canUseIter.next(); + + if (joinNode(currRes, pickedIdx, nodeIndexes)) { + currRes.set(pickedIdx); + canUse.set(pickedIdx, false); + } + } + + if (bestRes == null || currRes.cardinality() > bestRes.cardinality()) + bestRes = currRes; + } + + // Try to improve our best result, if it was formed on second or next iteration. + for (int nodeIdx = 0; nodeIdx < selectedNodesCnt; nodeIdx++) + if (!bestRes.get(nodeIdx) && joinNode(bestRes, nodeIdx, nodeIndexes)) + bestRes.set(nodeIdx); + + // Replace relative node indexes (used in indexes) to absolute node indexes (used in whole graph connections). + BitSet reindexedBestRes = new BitSet(totalNodesCnt); + Iterator it = new BitSetIterator(bestRes); + while (it.hasNext()) + reindexedBestRes.set(nodeIndexes[it.next()]); + + return reindexedBestRes; + } + + /** + * Checks that given {@code nodeIdx} can be joined to current fully-connected component, + * so after join result component will be also fully-connected. + * + * @param currComponent Current fully-connected component. + * @param nodeIdx Node relative index. + * @param nodeIndexes Node absolute indexes. + * @return {@code True} if given node can be joined to {@code currentComponent}. + */ + private boolean joinNode(BitSet currComponent, int nodeIdx, Integer[] nodeIndexes) { + boolean fullyConnected = true; + + Iterator alreadyUsedIter = new BitSetIterator(currComponent); + while (alreadyUsedIter.hasNext()) { + int existedIdx = alreadyUsedIter.next(); + + // If no connection between existing node and picked node, skip picked node. + if (!connections[nodeIndexes[nodeIdx]].get(nodeIndexes[existedIdx])) { + fullyConnected = false; + + break; + } + } + + return fullyConnected; + } + + /** + * Simple bruteforce implementation which works in O(2^N * N^2), where N is number of nodes. + * + * @param selectedNodesCnt Nodes count. + * @param selectedSet Set of nodes. + * @return Subset of given {@code set} of nodes which forms fully connected component. + */ + private BitSet bruteforce(int selectedNodesCnt, BitSet selectedSet) { + Integer[] indexes = extractNodeIndexes(selectedNodesCnt, selectedSet); + + int resMask = -1; + int maxCardinality = -1; + + // Iterate over all possible combinations of used nodes. + for (int mask = (1 << selectedNodesCnt) - 1; mask > 0; mask--) { + int cardinality = Integer.bitCount(mask); + + if (cardinality <= maxCardinality) + continue; + + // Check that selected set of nodes forms fully connected component. + boolean fullyConnected = true; + + for (int i = 0; i < selectedNodesCnt && fullyConnected; i++) + if ((mask & (1 << i)) != 0) + for (int j = 0; j < selectedNodesCnt; j++) + if ((mask & (1 << j)) != 0) { + boolean connected = connections[indexes[i]].get(indexes[j]); + + if (!connected) { + fullyConnected = false; + + break; + } + } + + if (fullyConnected) { + resMask = mask; + maxCardinality = cardinality; + } + } + + BitSet resSet = new BitSet(selectedNodesCnt); + + for (int i = 0; i < selectedNodesCnt; i++) { + if ((resMask & (1 << i)) != 0) { + int idx = indexes[i]; + + assert selectedSet.get(idx) + : "Result contains node which is not presented in income set [nodeIdx" + idx + ", set=" + selectedSet + "]"; + + resSet.set(idx); + } + } + + assert resSet.cardinality() > 0 + : "No nodes selected as fully connected component [set=" + selectedSet + "]"; + + return resSet; + } + + /** + * Comparator which sorts nodes by their connections array. + * + * Suppose you have connections matrix: + * + * 1111 + * 1101 + * 1010 + * 1101 + * + * Each connection row associated with some node. + * Comparator will sort node indexes using their connection rows as very big binary numbers, as in example: + * + * 1111 + * 1101 + * 1101 + * 1011 + * + * Note: Comparator sorts only node indexes, actual connection rows swapping will be not happened. + */ + private class ConnectionsComparator implements Comparator { + /** Cache each connection long array representation, to avoid creating new object for each comparison. */ + private final long[][] cachedConnRows; + + /** + * Constructor + * @param totalNodesCnt Total number of nodes in the cluster. + */ + ConnectionsComparator(int totalNodesCnt) { + cachedConnRows = new long[totalNodesCnt][]; + } + + /** + * Returns long array representation of connection row for given node {@code idx}. + * + * @param idx Node index. + * @return Long array connection row representation. + */ + private long[] connectionRow(int idx) { + if (cachedConnRows[idx] != null) + return cachedConnRows[idx]; + + return cachedConnRows[idx] = connections[idx].toLongArray(); + } + + /** {@inheritDoc} */ + @Override public int compare(Integer node1, Integer node2) { + long[] conn1 = connectionRow(node1); + long[] conn2 = connectionRow(node2); + + int len = Math.min(conn1.length, conn2.length); + for (int i = 0; i < len; i++) { + int res = Long.compare(conn1[i], conn2[i]); + + if (res != 0) + return res; + } + + return conn1.length - conn2.length; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/ActivateCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ActivateCommand.java new file mode 100644 index 0000000000000..ca634f8567e4c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ActivateCommand.java @@ -0,0 +1,73 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + + +package org.apache.ignite.internal.commandline; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientClusterState; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.client.GridClientException; + +import static org.apache.ignite.internal.commandline.CommandList.ACTIVATE; + +/** + * Activate cluster command. + */ +public class ActivateCommand implements Command { + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + Command.usage(logger, "Activate cluster:", ACTIVATE); + } + + /** + * Activate cluster. + * + * @param cfg Client configuration. + * @throws GridClientException If failed to activate. + */ + @Override public Object execute(GridClientConfiguration cfg, Logger logger) throws Exception { + try (GridClient client = Command.startClient(cfg)) { + GridClientClusterState state = client.state(); + + state.active(true); + + logger.info("Cluster activated"); + } + catch (Throwable e) { + logger.severe("Failed to activate cluster."); + + throw e; + } + + return null; + } + + /** {@inheritDoc} */ + @Override public Void arg() { + return null; + } + + /** {@inheritDoc} */ + @Override public String name() { + return ACTIVATE.toCommandName(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/Arguments.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/Arguments.java deleted file mode 100644 index 23c8eecdca664..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/Arguments.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.commandline; - -/** - * Bean with all parsed and validated arguments. - */ -public class Arguments { - /** Command. */ - private Command cmd; - - /** Host. */ - private String host; - - /** Port. */ - private String port; - - /** User. */ - private String user; - - /** Password. */ - private String pwd; - - /** Force option is used for auto confirmation. */ - private boolean force; - - /** - * Action for baseline command. - */ - private String baselineAct; - - /** - * Arguments for baseline command. - */ - private String baselineArgs; - - /** - * @param cmd Command. - * @param host Host. - * @param port Port. - * @param user User. - * @param pwd Password. - * @param baselineAct Baseline action. - * @param baselineArgs Baseline args. - * @param force Force flag. - */ - public Arguments(Command cmd, String host, String port, String user, String pwd, - String baselineAct, String baselineArgs, - boolean force - ) { - this.cmd = cmd; - this.host = host; - this.port = port; - this.user = user; - this.pwd = pwd; - this.baselineAct = baselineAct; - this.baselineArgs = baselineArgs; - this.force = force; - } - - /** - * @return command - */ - public Command command() { - return cmd; - } - - /** - * @return host name - */ - public String host() { - return host; - } - - /** - * @return port number - */ - public String port() { - return port; - } - - /** - * @return user name - */ - public String user() { - return user; - } - - /** - * @return password - */ - public String password() { - return pwd; - } - - /** - * @return baseline action - */ - public String baselineAction() { - return baselineAct; - } - - /** - * @return baseline arguments - */ - public String baselineArguments() { - return baselineArgs; - } - - /** - * @return Force option. - */ - public boolean force() { - return force; - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java new file mode 100644 index 0000000000000..4eb3cd359c685 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/BaselineCommand.java @@ -0,0 +1,225 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + + +package org.apache.ignite.internal.commandline; + +import java.util.ArrayList; +import java.util.Comparator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.baseline.BaselineArguments; +import org.apache.ignite.internal.commandline.baseline.BaselineSubcommands; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.visor.baseline.VisorBaselineNode; +import org.apache.ignite.internal.visor.baseline.VisorBaselineTask; +import org.apache.ignite.internal.visor.baseline.VisorBaselineTaskArg; +import org.apache.ignite.internal.visor.baseline.VisorBaselineTaskResult; + +import static org.apache.ignite.internal.commandline.CommandHandler.DELIM; +import static org.apache.ignite.internal.commandline.CommandList.BASELINE; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommonArgParser.CMD_AUTO_CONFIRMATION; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTask; +import static org.apache.ignite.internal.commandline.baseline.BaselineSubcommands.of; + +/** + * Commands assosiated with baseline functionality. + */ +public class BaselineCommand implements Command { + /** Arguments. */ + private BaselineArguments baselineArgs; + + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + final String constistIds = "consistentId1[,consistentId2,....,consistentIdN]"; + + Command.usage(logger, "Print cluster baseline topology:", BASELINE); + Command.usage(logger, "Add nodes into baseline topology:", BASELINE, BaselineSubcommands.ADD.text(), + constistIds, optional(CMD_AUTO_CONFIRMATION)); + Command.usage(logger, "Remove nodes from baseline topology:", BASELINE, BaselineSubcommands.REMOVE.text(), + constistIds, optional(CMD_AUTO_CONFIRMATION)); + Command.usage(logger, "Set baseline topology:", BASELINE, BaselineSubcommands.SET.text(), constistIds, + optional(CMD_AUTO_CONFIRMATION)); + Command.usage(logger, "Set baseline topology based on version:", BASELINE, + BaselineSubcommands.VERSION.text() + " topologyVersion", optional(CMD_AUTO_CONFIRMATION)); + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + if (BaselineSubcommands.COLLECT != baselineArgs.getCmd()) + return "Warning: the command will perform changes in baseline."; + + return null; + } + + /** + * Change baseline. + * + * + * @param clientCfg Client configuration. + * @throws Exception If failed to execute baseline action. + */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + VisorBaselineTaskResult res = executeTask(client, VisorBaselineTask.class, toVisorArguments(baselineArgs), clientCfg); + + baselinePrint0(res, logger); + } + catch (Throwable e) { + logger.severe("Failed to execute baseline command='" + baselineArgs.getCmd().text() + "'"); + logger.severe(CommandLogger.errorMessage(e)); + + throw e; + } + + return null; + } + + /** {@inheritDoc} */ + @Override public BaselineArguments arg() { + return baselineArgs; + } + + /** + * Prepare task argument. + * + * @param args Argument from command line. + * @return Task argument. + */ + private VisorBaselineTaskArg toVisorArguments(BaselineArguments args) { + return new VisorBaselineTaskArg(args.getCmd().visorBaselineOperation(), args.getTopVer(), args.getConsistentIds()); + } + + /** + * Print baseline topology. + * + * @param res Task result with baseline topology. + */ + private void baselinePrint0(VisorBaselineTaskResult res, Logger logger) { + logger.info("Cluster state: " + (res.isActive() ? "active" : "inactive")); + logger.info("Current topology version: " + res.getTopologyVersion()); + + logger.info(""); + + Map baseline = res.getBaseline(); + + Map srvs = res.getServers(); + + // if task runs on a node with VisorBaselineNode of old version (V1) we'll get order=null for all nodes. + + String crdStr = srvs.values().stream() + // check for not null + .filter(node -> node.getOrder() != null) + .min(Comparator.comparing(VisorBaselineNode::getOrder)) + // format + .map(crd -> " (Coordinator: ConsistentId=" + crd.getConsistentId() + ", Order=" + crd.getOrder() + ")") + .orElse(""); + + logger.info("Current topology version: " + res.getTopologyVersion() + crdStr); + logger.info(""); + + if (F.isEmpty(baseline)) + logger.info("Baseline nodes not found."); + else { + logger.info("Baseline nodes:"); + + for (VisorBaselineNode node : baseline.values()) { + VisorBaselineNode srvNode = srvs.get(node.getConsistentId()); + + String state = ", State=" + (srvNode != null ? "ONLINE" : "OFFLINE"); + + String order = srvNode != null ? ", Order=" + srvNode.getOrder() : ""; + + logger.info(DOUBLE_INDENT + "ConsistentId=" + node.getConsistentId() + state + order); + } + + logger.info(DELIM); + logger.info("Number of baseline nodes: " + baseline.size()); + + logger.info(""); + + List others = new ArrayList<>(); + + for (VisorBaselineNode node : srvs.values()) { + if (!baseline.containsKey(node.getConsistentId())) + others.add(node); + } + + if (F.isEmpty(others)) + logger.info("Other nodes not found."); + else { + logger.info("Other nodes:"); + + for (VisorBaselineNode node : others) + logger.info(DOUBLE_INDENT + "ConsistentId=" + node.getConsistentId() + ", Order=" + node.getOrder()); + + logger.info("Number of other nodes: " + others.size()); + } + } + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + if (!argIter.hasNextSubArg()) { + this.baselineArgs = new BaselineArguments.Builder(BaselineSubcommands.COLLECT).build(); + + return; + } + + BaselineSubcommands cmd = of(argIter.nextArg("Expected baseline action")); + + if (cmd == null) + throw new IllegalArgumentException("Expected correct baseline action"); + + BaselineArguments.Builder baselineArgs = new BaselineArguments.Builder(cmd); + + switch (cmd) { + case ADD: + case REMOVE: + case SET: + Set ids = argIter.nextStringSet("list of consistent ids"); + + if (F.isEmpty(ids)) + throw new IllegalArgumentException("Empty list of consistent IDs"); + + baselineArgs.withConsistentIds(new ArrayList<>(ids)); + + break; + + case VERSION: + baselineArgs.withTopVer(argIter.nextLongArg("topology version")); + + break; + } + + this.baselineArgs = baselineArgs.build(); + } + + /** {@inheritDoc} */ + @Override public String name() { + return BASELINE.toCommandName(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/Command.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/Command.java index e73a24fe104ba..c1f382e98554e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/Command.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/Command.java @@ -17,49 +17,92 @@ package org.apache.ignite.internal.commandline; -/** - * Command. - */ -public enum Command { - /** */ - ACTIVATE("--activate"), +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.client.GridClientFactory; - /** */ - DEACTIVATE("--deactivate"), +import static org.apache.ignite.internal.commandline.CommandHandler.UTILITY_NAME; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; - /** */ - STATE("--state"), +/** + * Abstract class for all control.sh commands, has already implemented methods and abstract methods. + * Define flow how to work with command. + * + * @param Generic for getArg method which should return command-specific paramters which it would be run with. + */ +public interface Command { + /** + * Method to create thin client for communication with cluster. + * + * @param clientCfg Thin client configuration. + * @return Grid thin client instance which is already connected to cluster. + * @throws Exception If error occur. + */ + public static GridClient startClient(GridClientConfiguration clientCfg) throws Exception { + GridClient client = GridClientFactory.start(clientCfg); - /** */ - BASELINE("--baseline"); + // If connection is unsuccessful, fail before doing any operations: + if (!client.connected()) + client.throwLastError(); - /** */ - private final String text; + return client; + } /** - * @param text Text. + * Print command usage. + * + * @param logger Logger to use. + * @param desc Command description. + * @param args Arguments. */ - Command(String text) { - this.text = text; + public static void usage(Logger logger, String desc, CommandList cmd, String... args) { + logger.info(INDENT + desc); + logger.info(DOUBLE_INDENT + CommandLogger.join(" ", UTILITY_NAME, cmd, CommandLogger.join(" ", args))); + logger.info(""); } /** - * @param text Command text. - * @return Command for the text. + * Actual command execution. Parameters for run should be already set by calling parseArguments method. + * + * @param clientCfg Thin client configuration if connection to cluster is necessary. + * @param logger Logger to use. + * @return Result of operation (mostly usable for tests). + * @throws Exception If error occur. */ - public static Command of(String text) { - for (Command cmd : Command.values()) { - if (cmd.text().equalsIgnoreCase(text)) - return cmd; - } + public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception; + /** + * @return Message text to show user for. If null it means that confirmantion is not needed. + */ + public default String confirmationPrompt() { return null; } /** - * @return Command text. + * Parse command-specific arguments. + * + * @param argIterator Argument iterator. */ - public String text() { - return text; + public default void parseArguments(CommandArgIterator argIterator) { + //Empty block. } + + /** + * @return Command arguments which were parsed during {@link #parseArguments(CommandArgIterator)} call. + */ + public T arg(); + + /** + * Print info for user about command (parameters, use cases and so on). + * + * @param logger Logger to use. + */ + public void printUsage(Logger logger); + + /** + * @return command name. + */ + String name(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandArgIterator.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandArgIterator.java new file mode 100644 index 0000000000000..ae36596a59df7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandArgIterator.java @@ -0,0 +1,154 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline; + +import java.util.HashSet; +import java.util.Iterator; +import java.util.Set; +import org.apache.ignite.internal.util.typedef.F; +import org.jetbrains.annotations.NotNull; + +/** + * Iterator over command arguments. + */ +public class CommandArgIterator { + /** */ + private Iterator argsIt; + + /** */ + private String peekedArg; + + /** + * Set of common arguments names and high level command name set. + */ + private final Set commonArgumentsAndHighLevelCommandSet; + + /** + * @param argsIt Raw argument iterator. + * @param commonArgumentsAndHighLevelCommandSet All known subcomands. + */ + public CommandArgIterator(Iterator argsIt, Set commonArgumentsAndHighLevelCommandSet) { + this.argsIt = argsIt; + this.commonArgumentsAndHighLevelCommandSet = commonArgumentsAndHighLevelCommandSet; + } + + /** + * @return Returns {@code true} if the iteration has more elements. + */ + public boolean hasNextArg() { + return peekedArg != null || argsIt.hasNext(); + } + + /** + * @return true if there's next argument for subcommand. + */ + public boolean hasNextSubArg() { + return hasNextArg() && CommandList.of(peekNextArg()) == null && + !commonArgumentsAndHighLevelCommandSet.contains(peekNextArg()); + } + + /** + * Extract next argument. + * + * @param err Error message. + * @return Next argument value. + */ + public String nextArg(String err) { + if (peekedArg != null) { + String res = peekedArg; + + peekedArg = null; + + return res; + } + + if (argsIt.hasNext()) + return argsIt.next(); + + throw new IllegalArgumentException(err); + } + + /** + * Returns the next argument in the iteration, without advancing the iteration. + * + * @return Next argument value or {@code null} if no next argument. + */ + public String peekNextArg() { + if (peekedArg == null && argsIt.hasNext()) + peekedArg = argsIt.next(); + + return peekedArg; + } + + /** + * @return Numeric value. + */ + public long nextLongArg(String argName) { + String str = nextArg("Expecting " + argName); + + try { + long val = Long.parseLong(str); + + if (val < 0) + throw new IllegalArgumentException("Invalid value for " + argName + ": " + val); + + return val; + } + catch (NumberFormatException ignored) { + throw new IllegalArgumentException("Invalid value for " + argName + ": " + str); + } + } + + /** + * @param argName Name of argument. + */ + public Set nextStringSet(String argName) { + String string = nextArg("Expected " + argName); + + return parseStringSet(string); + } + + /** + * + * @param string To scan on for string set. + * @return Set of string parsed from string param. + */ + @NotNull public Set parseStringSet(String string) { + Set namesSet = new HashSet<>(); + + for (String name : string.split(",")) { + if (F.isEmpty(name)) + throw new IllegalArgumentException("Non-empty string expected."); + + namesSet.add(name.trim()); + } + return namesSet; + } + + /** + * Check if raw arg is command or option. + * + * @return {@code true} If raw arg is command, overwise {@code false}. + */ + public static boolean isCommandOrOption(String raw) { + return raw != null && raw.contains("--"); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java index 5993f593c7342..b95e0dd086430 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandHandler.java @@ -17,804 +17,673 @@ package org.apache.ignite.internal.commandline; -import java.util.ArrayList; +import java.io.File; +import java.time.Duration; +import java.time.LocalDateTime; import java.util.Arrays; import java.util.Collections; -import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.Scanner; -import org.apache.ignite.internal.client.GridClient; +import java.util.UUID; +import java.util.logging.FileHandler; +import java.util.logging.Formatter; +import java.util.logging.Handler; +import java.util.logging.Level; +import java.util.logging.LogRecord; +import java.util.logging.Logger; +import java.util.logging.StreamHandler; +import java.util.stream.Collectors; +import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.client.GridClientAuthenticationException; import org.apache.ignite.internal.client.GridClientClosedException; -import org.apache.ignite.internal.client.GridClientClusterState; -import org.apache.ignite.internal.client.GridClientCompute; import org.apache.ignite.internal.client.GridClientConfiguration; import org.apache.ignite.internal.client.GridClientDisconnectedException; -import org.apache.ignite.internal.client.GridClientException; -import org.apache.ignite.internal.client.GridClientFactory; import org.apache.ignite.internal.client.GridClientHandshakeException; -import org.apache.ignite.internal.client.GridClientNode; import org.apache.ignite.internal.client.GridServerUnreachableException; import org.apache.ignite.internal.client.impl.connection.GridClientConnectionResetException; +import org.apache.ignite.internal.client.ssl.GridSslBasicContextFactory; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.X; -import org.apache.ignite.internal.visor.VisorTaskArgument; -import org.apache.ignite.internal.visor.baseline.VisorBaselineNode; -import org.apache.ignite.internal.visor.baseline.VisorBaselineOperation; -import org.apache.ignite.internal.visor.baseline.VisorBaselineTask; -import org.apache.ignite.internal.visor.baseline.VisorBaselineTaskArg; -import org.apache.ignite.internal.visor.baseline.VisorBaselineTaskResult; +import org.apache.ignite.internal.util.typedef.internal.SB; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.logger.java.JavaLoggerFileHandler; +import org.apache.ignite.logger.java.JavaLoggerFormatter; import org.apache.ignite.plugin.security.SecurityCredentials; import org.apache.ignite.plugin.security.SecurityCredentialsBasicProvider; +import org.apache.ignite.plugin.security.SecurityCredentialsProvider; +import org.apache.ignite.ssl.SslContextFactory; import org.jetbrains.annotations.NotNull; +import org.jetbrains.annotations.Nullable; import static org.apache.ignite.internal.IgniteVersionUtils.ACK_VER_STR; import static org.apache.ignite.internal.IgniteVersionUtils.COPYRIGHT; -import static org.apache.ignite.internal.commandline.Command.ACTIVATE; -import static org.apache.ignite.internal.commandline.Command.BASELINE; -import static org.apache.ignite.internal.commandline.Command.DEACTIVATE; -import static org.apache.ignite.internal.commandline.Command.STATE; -import static org.apache.ignite.internal.visor.baseline.VisorBaselineOperation.ADD; -import static org.apache.ignite.internal.visor.baseline.VisorBaselineOperation.COLLECT; -import static org.apache.ignite.internal.visor.baseline.VisorBaselineOperation.REMOVE; -import static org.apache.ignite.internal.visor.baseline.VisorBaselineOperation.SET; -import static org.apache.ignite.internal.visor.baseline.VisorBaselineOperation.VERSION; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommonArgParser.CMD_AUTO_CONFIRMATION; +import static org.apache.ignite.internal.commandline.CommonArgParser.getCommonOptions; +import static org.apache.ignite.internal.commandline.TaskExecutor.DFLT_HOST; +import static org.apache.ignite.internal.commandline.TaskExecutor.DFLT_PORT; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; /** * Class that execute several commands passed via command line. */ public class CommandHandler { /** */ - static final String DFLT_HOST = "127.0.0.1"; + static final String CMD_HELP = "--help"; /** */ - static final String DFLT_PORT = "11211"; + public static final String CONFIRM_MSG = "y"; /** */ - private static final String CMD_HELP = "--help"; + static final String DELIM = "--------------------------------------------------------------------------------"; /** */ - private static final String CMD_HOST = "--host"; + public static final int EXIT_CODE_OK = 0; /** */ - private static final String CMD_PORT = "--port"; + public static final int EXIT_CODE_INVALID_ARGUMENTS = 1; /** */ - private static final String CMD_PASSWORD = "--password"; + public static final int EXIT_CODE_CONNECTION_FAILED = 2; /** */ - private static final String CMD_USER = "--user"; + public static final int ERR_AUTHENTICATION_FAILED = 3; /** */ - private static final String BASELINE_ADD = "add"; + public static final int EXIT_CODE_UNEXPECTED_ERROR = 4; /** */ - private static final String BASELINE_REMOVE = "remove"; + private static final long DFLT_PING_INTERVAL = 5000L; /** */ - private static final String BASELINE_COLLECT = "collect"; + private static final long DFLT_PING_TIMEOUT = 30_000L; /** */ - private static final String BASELINE_SET = "set"; + private static final Scanner IN = new Scanner(System.in); - /** */ - private static final String BASELINE_SET_VERSION = "version"; + /** Utility name. */ + public static final String UTILITY_NAME = "control.(sh|bat)"; /** */ - private static final String DELIM = "--------------------------------------------------------------------------------"; + public static final String NULL = "null"; - /** Force option is used for auto confirmation. */ - private static final String CMD_FORCE = "--force"; + /** JULs logger. */ + private final Logger logger; - /** */ - public static final int EXIT_CODE_OK = 0; + /** Session. */ + protected final String ses = U.id8(UUID.randomUUID()); - /** */ - public static final int EXIT_CODE_INVALID_ARGUMENTS = 1; - - /** */ - public static final int EXIT_CODE_CONNECTION_FAILED = 2; + /** Console instance. Public access needs for tests. */ + public GridConsole console = GridConsoleAdapter.getInstance(); /** */ - public static final int ERR_AUTHENTICATION_FAILED = 3; - - /** */ - public static final int EXIT_CODE_UNEXPECTED_ERROR = 4; - - /** */ - private static final Scanner IN = new Scanner(System.in); - - /** */ - private Iterator argsIt; - - /** */ - private String peekedArg; + private Object lastOperationRes; /** - * Output specified string to console. - * - * @param s String to output. - */ - private void log(String s) { - System.out.println(s); - } - - /** - * Provides a prompt, then reads a single line of text from the console. - * - * @param prompt text - * @return A string containing the line read from the console + * @param args Arguments to parse and apply. */ - private String readLine(String prompt) { - System.out.print(prompt); - - return IN.nextLine(); - } + public static void main(String[] args) { + CommandHandler hnd = new CommandHandler(); - /** - * Output empty line. - */ - private void nl() { - System.out.println(""); + System.exit(hnd.execute(Arrays.asList(args))); } /** - * Print error to console. - * - * @param errCode Error code to return. - * @param s Optional message. - * @param e Error to print. + * @return prepared JULs logger. */ - private int error(int errCode, String s, Throwable e) { - if (!F.isEmpty(s)) - log(s); + private Logger setupJavaLogger() { + Logger result = initLogger(CommandHandler.class.getName() + "Log"); - String msg = e.getMessage(); + // Adding logging to file. + try { + String absPathPattern = new File(JavaLoggerFileHandler.logDirectory(U.defaultWorkDirectory()), "control-utility-%g.log").getAbsolutePath(); - if (F.isEmpty(msg)) - msg = e.getClass().getName(); + FileHandler fileHandler = new FileHandler(absPathPattern, 5 * 1024 * 1024, 5); - if (msg.startsWith("Failed to handle request")) { - int p = msg.indexOf("err="); + fileHandler.setFormatter(new JavaLoggerFormatter()); - msg = msg.substring(p + 4, msg.length() - 1); + result.addHandler(fileHandler); + } + catch (Exception e) { + System.out.println("Failed to configure logging to file"); } - log("Error: " + msg); + // Adding logging to console. + result.addHandler(setupStreamHandler()); - return errCode; + return result; } /** - * Requests interactive user confirmation if forthcoming operation is dangerous. - * - * @param args Arguments. - * @return {@code true} if operation confirmed (or not needed), {@code false} otherwise. + * @return StreamHandler with empty formatting */ - private boolean confirm(Arguments args) { - String prompt = confirmationPrompt(args); - - if (prompt == null) - return true; - - return "y".equalsIgnoreCase(readLine(prompt)); + public static StreamHandler setupStreamHandler() { + return new StreamHandler(System.out, new Formatter() { + @Override public String format(LogRecord record) { + return record.getMessage() + "\n"; + } + }); } /** - * @param args Arguments. - * @return Prompt text if confirmation needed, otherwise {@code null}. + * Initialises JULs logger with basic settings + * @param loggerName logger name. If {@code null} anonymous logger is returned. + * @return logger */ - private String confirmationPrompt(Arguments args) { - if (args.force()) - return null; - - String str = null; + public static Logger initLogger(@Nullable String loggerName) { + Logger result; - switch (args.command()) { - case DEACTIVATE: - str = "Warning: the command will deactivate a cluster."; - break; + if (loggerName == null) + result = Logger.getAnonymousLogger(); + else + result = Logger.getLogger(loggerName); - case BASELINE: - if (!BASELINE_COLLECT.equals(args.baselineAction())) - str = "Warning: the command will perform changes in baseline."; - } + result.setLevel(Level.INFO); + result.setUseParentHandlers(false); - return str == null ? null : str + "\nPress 'y' to continue..."; + return result; } /** - * @param rawArgs Arguments. + * */ - private void initArgIterator(List rawArgs) { - argsIt = rawArgs.iterator(); - peekedArg = null; + public CommandHandler() { + logger = setupJavaLogger(); } /** - * @return Returns {@code true} if the iteration has more elements. + * @param logger Logger to use. */ - private boolean hasNextArg() { - return peekedArg != null || argsIt.hasNext(); + public CommandHandler(Logger logger) { + this.logger = logger; } /** - * Activate cluster. + * Parse and execute command. * - * @param client Client. - * @throws GridClientException If failed to activate. + * @param rawArgs Arguments to parse and execute. + * @return Exit code. */ - private void activate(GridClient client) throws Throwable { - try { - GridClientClusterState state = client.state(); + public int execute(List rawArgs) { + LocalDateTime startTime = LocalDateTime.now(); - state.active(true); + Thread.currentThread().setName("session=" + ses); - log("Cluster activated"); - } - catch (Throwable e) { - log("Failed to activate cluster."); + logger.info("Control utility [ver. " + ACK_VER_STR + "]"); + logger.info(COPYRIGHT); + logger.info("User: " + System.getProperty("user.name")); + logger.info("Time: " + startTime); - throw e; - } - } + String commandName = ""; - /** - * Deactivate cluster. - * - * @param client Client. - * @throws Throwable If failed to deactivate. - */ - private void deactivate(GridClient client) throws Throwable { try { - GridClientClusterState state = client.state(); + if (F.isEmpty(rawArgs) || (rawArgs.size() == 1 && CMD_HELP.equalsIgnoreCase(rawArgs.get(0)))) { + printHelp(); - state.active(false); + return EXIT_CODE_OK; + } - log("Cluster deactivated"); - } - catch (Throwable e) { - log("Failed to deactivate cluster."); + ConnectionAndSslParameters args = new CommonArgParser(logger).parseAndValidate(rawArgs.iterator()); - throw e; - } - } + Command command = args.command(); + commandName = command.name(); - /** - * Print cluster state. - * - * @param client Client. - * @throws Throwable If failed to print state. - */ - private void state(GridClient client) throws Throwable { - try { - GridClientClusterState state = client.state(); + if (!args.autoConfirmation() && !confirm(command.confirmationPrompt())) { + logger.info("Operation cancelled."); - log("Cluster is " + (state.active() ? "active" : "inactive")); - } - catch (Throwable e) { - log("Failed to get cluster state."); + return EXIT_CODE_OK; + } - throw e; - } - } + boolean tryConnectAgain = true; - /** - * - * @param client Client - * @return Task result. - * @throws GridClientException If failed to execute task. - */ - private R executeTask(GridClient client, Class taskCls, Object taskArgs) throws GridClientException { - GridClientCompute compute = client.compute(); + int tryConnectMaxCount = 3; - List nodes = new ArrayList<>(); + boolean suppliedAuth = !F.isEmpty(args.userName()) && !F.isEmpty(args.password()); - for (GridClientNode node : compute.nodes()) - if (node.connectable()) - nodes.add(node); + GridClientConfiguration clientCfg = getClientConfiguration(args); - if (F.isEmpty(nodes)) - throw new GridClientDisconnectedException("Connectable node not found", null); + while (tryConnectAgain) { + tryConnectAgain = false; - GridClientNode node = compute.balancer().balancedNode(nodes); + try { + logger.info("Command [" + commandName + "] started"); + logger.info("Arguments: " + argumentsToString(rawArgs)); - return compute.projection(node).execute(taskCls.getName(), - new VisorTaskArgument<>(node.nodeId(), taskArgs, false)); - } + logger.info(DELIM); + lastOperationRes = command.execute(clientCfg, logger); + } + catch (Throwable e) { + if (tryConnectMaxCount > 0 && isAuthError(e)) { + logger.info(suppliedAuth ? + "Authentication error, please try again." : + "This cluster requires authentication."); - /** - * Change baseline. - * - * @param client Client. - * @param baselineAct Baseline action to execute. @throws GridClientException If failed to execute baseline action. - * @param baselineArgs Baseline action arguments. - * @throws Throwable If failed to execute baseline action. - */ - private void baseline(GridClient client, String baselineAct, String baselineArgs) throws Throwable { - switch (baselineAct) { - case BASELINE_ADD: - baselineAdd(client, baselineArgs); - break; - - case BASELINE_REMOVE: - baselineRemove(client, baselineArgs); - break; - - case BASELINE_SET: - baselineSet(client, baselineArgs); - break; - - case BASELINE_SET_VERSION: - baselineVersion(client, baselineArgs); - break; - - case BASELINE_COLLECT: - baselinePrint(client); - break; - } - } + String user = clientCfg.getSecurityCredentialsProvider() == null ? + requestDataFromConsole("user: ") : + (String)clientCfg.getSecurityCredentialsProvider().credentials().getLogin(); - /** - * Prepare task argument. - * - * @param op Operation. - * @param s Argument from command line. - * @return Task argument. - */ - private VisorBaselineTaskArg arg(VisorBaselineOperation op, String s) { - switch (op) { - case ADD: - case REMOVE: - case SET: - if(F.isEmpty(s)) - throw new IllegalArgumentException("Empty list of consistent IDs"); + clientCfg = getClientConfiguration(user, new String(requestPasswordFromConsole("password: ")), args); - List consistentIds = new ArrayList<>(); + tryConnectAgain = true; - for (String consistentId : s.split(",")) - consistentIds.add(consistentId.trim()); + suppliedAuth = true; - return new VisorBaselineTaskArg(op, -1, consistentIds); - - case VERSION: - try { - long topVer = Long.parseLong(s); + tryConnectMaxCount--; + } + else { + if (tryConnectMaxCount == 0) + throw new GridClientAuthenticationException("Authentication error, maximum number of " + + "retries exceeded"); - return new VisorBaselineTaskArg(op, topVer, null); - } - catch (NumberFormatException e) { - throw new IllegalArgumentException("Invalid topology version: " + s, e); + throw e; + } } + } - default: - return new VisorBaselineTaskArg(op, -1, null); + logger.info("Command [" + commandName + "] finished with code: " + EXIT_CODE_OK); + return EXIT_CODE_OK; } - } + catch (IllegalArgumentException e) { + logger.severe("Check arguments. " + CommandLogger.errorMessage(e)); + logger.info("Command [" + commandName + "] finished with code: " + EXIT_CODE_INVALID_ARGUMENTS); - /** - * Print baseline topology. - * - * @param res Task result with baseline topology. - */ - private void baselinePrint0(VisorBaselineTaskResult res) { - log("Cluster state: " + (res.isActive() ? "active" : "inactive")); - log("Current topology version: " + res.getTopologyVersion()); - nl(); + return EXIT_CODE_INVALID_ARGUMENTS; + } + catch (Throwable e) { + if (isAuthError(e)) { + logger.severe("Authentication error. " + CommandLogger.errorMessage(e)); + logger.info("Command [" + commandName + "] finished with code: " + ERR_AUTHENTICATION_FAILED); - Map baseline = res.getBaseline(); - Map servers = res.getServers(); + return ERR_AUTHENTICATION_FAILED; + } - if (F.isEmpty(baseline)) - log("Baseline nodes not found."); - else { - log("Baseline nodes:"); + if (isConnectionError(e)) { + IgniteCheckedException cause = X.cause(e, IgniteCheckedException.class); - for(VisorBaselineNode node : baseline.values()) { - log(" ConsistentID=" + node.getConsistentId() + ", STATE=" + - (servers.containsKey(node.getConsistentId()) ? "ONLINE" : "OFFLINE")); - } + if (isConnectionClosedSilentlyException(e)) + logger.severe("Connection to cluster failed. Please check firewall settings and " + + "client and server are using the same SSL configuration."); + else { + if (isSSLMisconfigurationError(cause)) + e = cause; - log(DELIM); - log("Number of baseline nodes: " + baseline.size()); + logger.severe("Connection to cluster failed. " + CommandLogger.errorMessage(e)); - nl(); + } - List others = new ArrayList<>(); + logger.info("Command [" + commandName + "] finished with code: " + EXIT_CODE_CONNECTION_FAILED); - for (VisorBaselineNode node : servers.values()) { - if (!baseline.containsKey(node.getConsistentId())) - others.add(node); + return EXIT_CODE_CONNECTION_FAILED; } - if (F.isEmpty(others)) - log("Other nodes not found."); - else { - log("Other nodes:"); + logger.severe(CommandLogger.errorMessage(e)); + logger.info("Command [" + commandName + "] finished with code: " + EXIT_CODE_UNEXPECTED_ERROR); + + return EXIT_CODE_UNEXPECTED_ERROR; + } + finally { + LocalDateTime endTime = LocalDateTime.now(); - for(VisorBaselineNode node : others) - log(" ConsistentID=" + node.getConsistentId()); + Duration diff = Duration.between(startTime, endTime); - log("Number of other nodes: " + others.size()); - } + logger.info("Control utility has completed execution at: " + endTime); + logger.info("Execution time: " + diff.toMillis() + " ms"); + + Arrays.stream(logger.getHandlers()) + .filter(handler -> handler instanceof FileHandler) + .forEach(Handler::close); } } /** - * Print current baseline. + * Analyses passed exception to find out whether it is related to SSL misconfiguration issues. + * + * (!) Implementation depends heavily on structure of exception stack trace + * thus is very fragile to any changes in that structure. * - * @param client Client. + * @param e Exception to analyze. + * + * @return {@code True} if exception may be related to SSL misconfiguration issues. */ - private void baselinePrint(GridClient client) throws GridClientException { - VisorBaselineTaskResult res = executeTask(client, VisorBaselineTask.class, arg(COLLECT, "")); - - baselinePrint0(res); + private boolean isSSLMisconfigurationError(Throwable e) { + return e != null && e.getMessage() != null && e.getMessage().contains("SSL"); } /** - * Add nodes to baseline. + * Analyses passed exception to find out whether it is caused by server closing connection silently. + * This happens when client tries to establish unprotected connection + * to the cluster supporting only secured communications (e.g. when server is configured to use SSL certificates + * and client is not). * - * @param client Client. - * @param baselineArgs Baseline action arguments. - * @throws Throwable If failed to add nodes to baseline. + * (!) Implementation depends heavily on structure of exception stack trace + * thus is very fragile to any changes in that structure. + * + * @param e Exception to analyse. + * @return {@code True} if exception may be related to the attempt to establish unprotected connection + * to secured cluster. */ - private void baselineAdd(GridClient client, String baselineArgs) throws Throwable { - try { - VisorBaselineTaskResult res = executeTask(client, VisorBaselineTask.class, arg(ADD, baselineArgs)); + private boolean isConnectionClosedSilentlyException(Throwable e) { + if (!(e instanceof GridClientDisconnectedException)) + return false; - baselinePrint0(res); - } - catch (Throwable e) { - log("Failed to add nodes to baseline."); + Throwable cause = e.getCause(); - throw e; - } - } + if (cause == null) + return false; - /** - * Remove nodes from baseline. - * - * @param client Client. - * @param consistentIds Consistent IDs. - * @throws Throwable If failed to remove nodes from baseline. - */ - private void baselineRemove(GridClient client, String consistentIds) throws Throwable { - try { - VisorBaselineTaskResult res = executeTask(client, VisorBaselineTask.class, arg(REMOVE, consistentIds)); + cause = cause.getCause(); - baselinePrint0(res); - } - catch (Throwable e) { - log("Failed to remove nodes from baseline."); + if (cause instanceof GridClientConnectionResetException && + cause.getMessage() != null && + cause.getMessage().contains("Failed to perform handshake") + ) + return true; - throw e; - } + return false; } /** - * Set baseline. - * - * @param client Client. - * @param consistentIds Consistent IDs. - * @throws Throwable If failed to set baseline. + * @param rawArgs Arguments which user has provided. + * @return String which could be shown in console and pritned to log. */ - private void baselineSet(GridClient client, String consistentIds) throws Throwable { - try { - VisorBaselineTaskResult res = executeTask(client, VisorBaselineTask.class, arg(SET, consistentIds)); + private String argumentsToString(List rawArgs) { + boolean hide = false; - baselinePrint0(res); - } - catch (Throwable e) { - log("Failed to set baseline."); + SB sb = new SB(); - throw e; - } - } + for (int i = 0; i < rawArgs.size(); i++) { + if (hide) { + sb.a("***** "); - /** - * Set baseline by topology version. - * - * @param client Client. - * @param arg Argument from command line. - */ - private void baselineVersion(GridClient client, String arg) throws GridClientException { - try { - VisorBaselineTaskResult res = executeTask(client, VisorBaselineTask.class, arg(VERSION, arg)); + hide = false; - baselinePrint0(res); - } - catch (Throwable e) { - log("Failed to set baseline with specified topology version."); + continue; + } + + String arg = rawArgs.get(i); + + sb.a(arg).a(' '); - throw e; + hide = CommonArgParser.isSensitiveArgument(arg); } - } - /** - * @param e Exception to check. - * @return {@code true} if specified exception is {@link GridClientAuthenticationException}. - */ - private boolean isAuthError(Throwable e) { - return X.hasCause(e, GridClientAuthenticationException.class); + return sb.toString(); } /** - * @param e Exception to check. - * @return {@code true} if specified exception is a connection error. + * Does one of three things: + *
    + *
  • returns user name from connection parameters if it is there;
  • + *
  • returns user name from client configuration if it is there;
  • + *
  • requests user input and returns entered name.
  • + *
+ * + * @param args Connection parameters. + * @param clientCfg Client configuration. + * @throws IgniteCheckedException If security credetials cannot be provided from client configuration. */ - private boolean isConnectionError(Throwable e) { - return e instanceof GridClientClosedException || - e instanceof GridClientConnectionResetException || - e instanceof GridClientDisconnectedException || - e instanceof GridClientHandshakeException || - e instanceof GridServerUnreachableException; + private String retrieveUserName( + ConnectionAndSslParameters args, + GridClientConfiguration clientCfg + ) throws IgniteCheckedException { + if (!F.isEmpty(args.userName())) + return args.userName(); + else if (clientCfg.getSecurityCredentialsProvider() == null) + return requestDataFromConsole("user: "); + else + return (String)clientCfg.getSecurityCredentialsProvider().credentials().getLogin(); } /** - * Print command usage. - * - * @param desc Command description. - * @param args Arguments. + * @param args Common arguments. + * @return Thin client configuration to connect to cluster. + * @throws IgniteCheckedException If error occur. */ - private void usage(String desc, Command cmd, String... args) { - log(desc); - log(" control.sh [--host HOST_OR_IP] [--port PORT] [--user USER] [--password PASSWORD] " + cmd.text() + String.join("", args)); - nl(); + @NotNull private GridClientConfiguration getClientConfiguration( + ConnectionAndSslParameters args + ) throws IgniteCheckedException { + return getClientConfiguration(args.userName(), args.password(), args); } /** - * Extract next argument. - * - * @param err Error message. - * @return Next argument value. + * @param userName User name for authorization. + * @param password Password for authorization. + * @param args Common arguments. + * @return Thin client configuration to connect to cluster. + * @throws IgniteCheckedException If error occur. */ - private String nextArg(String err) { - if (peekedArg != null) { - String res = peekedArg; + @NotNull private GridClientConfiguration getClientConfiguration( + String userName, + String password, + ConnectionAndSslParameters args + ) throws IgniteCheckedException { + GridClientConfiguration clientCfg = new GridClientConfiguration(); - peekedArg = null; + clientCfg.setPingInterval(args.pingInterval()); - return res; - } + clientCfg.setPingTimeout(args.pingTimeout()); - if (argsIt.hasNext()) - return argsIt.next(); + clientCfg.setServers(Collections.singletonList(args.host() + ":" + args.port())); - throw new IllegalArgumentException(err); - } + if (!F.isEmpty(userName)) + clientCfg.setSecurityCredentialsProvider(getSecurityCredentialsProvider(userName, password, clientCfg)); - /** - * Returns the next argument in the iteration, without advancing the iteration. - * - * @return Next argument value or {@code null} if no next argument. - */ - private String peekNextArg() { - if (peekedArg == null && argsIt.hasNext()) - peekedArg = argsIt.next(); + if (!F.isEmpty(args.sslKeyStorePath())) + clientCfg.setSslContextFactory(createSslSupportFactory(args)); - return peekedArg; + return clientCfg; } /** - * Parses and validates arguments. - * - * @param rawArgs Array of arguments. - * @return Arguments bean. - * @throws IllegalArgumentException In case arguments aren't valid. + * @param userName User name for authorization. + * @param password Password for authorization. + * @param clientCfg Thin client configuration to connect to cluster. + * @return Security credentials provider with usage of given user name and password. + * @throws IgniteCheckedException If error occur. */ - @NotNull Arguments parseAndValidate(List rawArgs) { - String host = DFLT_HOST; - - String port = DFLT_PORT; - - String user = null; + @NotNull private SecurityCredentialsProvider getSecurityCredentialsProvider( + String userName, + String password, + GridClientConfiguration clientCfg + ) throws IgniteCheckedException { + SecurityCredentialsProvider securityCredential = clientCfg.getSecurityCredentialsProvider(); - String pwd = null; + if (securityCredential == null) + return new SecurityCredentialsBasicProvider(new SecurityCredentials(userName, password)); - String baselineAct = ""; + final SecurityCredentials credential = securityCredential.credentials(); + credential.setLogin(userName); + credential.setPassword(password); - String baselineArgs = ""; + return securityCredential; + } - boolean force = false; + /** + * @param args Commond args. + * @return Ssl support factory. + */ + @NotNull private GridSslBasicContextFactory createSslSupportFactory(ConnectionAndSslParameters args) { + GridSslBasicContextFactory factory = new GridSslBasicContextFactory(); - List commands = new ArrayList<>(); + List sslProtocols = split(args.sslProtocol(), ","); - initArgIterator(rawArgs); + String sslProtocol = F.isEmpty(sslProtocols) ? DFLT_SSL_PROTOCOL : sslProtocols.get(0); - while (hasNextArg()) { - String str = nextArg("").toLowerCase(); + factory.setProtocol(sslProtocol); + factory.setKeyAlgorithm(args.sslKeyAlgorithm()); - Command cmd = Command.of(str); + if (sslProtocols.size() > 1) + factory.setProtocols(sslProtocols); - if (cmd != null) { - switch (cmd) { - case ACTIVATE: - case DEACTIVATE: - case STATE: - commands.add(Command.of(str)); - break; + factory.setCipherSuites(split(args.getSslCipherSuites(), ",")); - case BASELINE: - commands.add(BASELINE); + factory.setKeyStoreFilePath(args.sslKeyStorePath()); - baselineAct = BASELINE_COLLECT; //default baseline action + if (args.sslKeyStorePassword() != null) + factory.setKeyStorePassword(args.sslKeyStorePassword()); + else + factory.setKeyStorePassword(requestPasswordFromConsole("SSL keystore password: ")); - str = peekNextArg(); + factory.setKeyStoreType(args.sslKeyStoreType()); - if (str != null) { - str = str.toLowerCase(); + if (F.isEmpty(args.sslTrustStorePath())) + factory.setTrustManagers(GridSslBasicContextFactory.getDisabledTrustManager()); + else { + factory.setTrustStoreFilePath(args.sslTrustStorePath()); - if (BASELINE_ADD.equals(str) || BASELINE_REMOVE.equals(str) || - BASELINE_SET.equals(str) || BASELINE_SET_VERSION.equals(str)) { - baselineAct = nextArg("Expected baseline action"); + if (args.sslTrustStorePassword() != null) + factory.setTrustStorePassword(args.sslTrustStorePassword()); + else + factory.setTrustStorePassword(requestPasswordFromConsole("SSL truststore password: ")); - baselineArgs = nextArg("Expected baseline arguments"); - } - } - } - } - else { - switch (str) { - case CMD_HOST: - host = nextArg("Expected host name"); - break; - - case CMD_PORT: - port = nextArg("Expected port number"); - - try { - int p = Integer.parseInt(port); - - if (p <= 0 || p > 65535) - throw new IllegalArgumentException("Invalid value for port: " + port); - } - catch (NumberFormatException ignored) { - throw new IllegalArgumentException("Invalid value for port: " + port); - } - break; - - case CMD_USER: - user = nextArg("Expected user name"); - break; - - case CMD_PASSWORD: - pwd = nextArg("Expected password"); - break; - - case CMD_FORCE: - force = true; - break; - default: - throw new IllegalArgumentException("Unexpected argument: " + str); - } - } + factory.setTrustStoreType(args.sslTrustStoreType()); } - int sz = commands.size(); - - if (sz < 1) - throw new IllegalArgumentException("No action was specified"); - - if (sz > 1) - throw new IllegalArgumentException("Only one action can be specified, but found: " + sz); - - Command cmd = commands.get(0); - - boolean hasUsr = F.isEmpty(user); - boolean hasPwd = F.isEmpty(pwd); - - if (hasUsr != hasPwd) - throw new IllegalArgumentException("Both user and password should be specified"); - - return new Arguments(cmd, host, port, user, pwd, baselineAct, baselineArgs, force); + return factory; } /** - * Parse and execute command. + * Used for tests. * - * @param rawArgs Arguments to parse and execute. - * @return Exit code. + * @return Last operation result; */ - public int execute(List rawArgs) { - log("Control utility [ver. " + ACK_VER_STR + "]"); - log(COPYRIGHT); - log("User: " + System.getProperty("user.name")); - log(DELIM); - - try { - if (F.isEmpty(rawArgs) || (rawArgs.size() == 1 && CMD_HELP.equalsIgnoreCase(rawArgs.get(0)))) { - log("This utility can do the following commands:"); - - usage(" Activate cluster:", ACTIVATE); - usage(" Deactivate cluster:", DEACTIVATE, " [--force]"); - usage(" Print current cluster state:", STATE); - usage(" Print cluster baseline topology:", BASELINE); - usage(" Add nodes into baseline topology:", BASELINE, " add consistentId1[,consistentId2,....,consistentIdN] [--force]"); - usage(" Remove nodes from baseline topology:", BASELINE, " remove consistentId1[,consistentId2,....,consistentIdN] [--force]"); - usage(" Set baseline topology:", BASELINE, " set consistentId1[,consistentId2,....,consistentIdN] [--force]"); - usage(" Set baseline topology based on version:", BASELINE, " version topologyVersion [--force]"); - - log("By default cluster deactivation and changes in baseline topology commands request interactive confirmation. "); - log(" --force option can be used to execute commands without prompting for confirmation."); - nl(); - - log("Default values:"); - log(" HOST_OR_IP=" + DFLT_HOST); - log(" PORT=" + DFLT_PORT); - nl(); - - log("Exit codes:"); - log(" " + EXIT_CODE_OK + " - successful execution."); - log(" " + EXIT_CODE_INVALID_ARGUMENTS + " - invalid arguments."); - log(" " + EXIT_CODE_CONNECTION_FAILED + " - connection failed."); - log(" " + ERR_AUTHENTICATION_FAILED + " - authentication failed."); - log(" " + EXIT_CODE_UNEXPECTED_ERROR + " - unexpected error."); - - return EXIT_CODE_OK; - } - - Arguments args = parseAndValidate(rawArgs); - - if (!confirm(args)) { - log("Operation canceled."); + public T getLastOperationResult() { + return (T)lastOperationRes; + } - return EXIT_CODE_OK; - } + /** + * Provides a prompt, then reads a single line of text from the console. + * + * @param prompt text + * @return A string containing the line read from the console + */ + private String readLine(String prompt) { + System.out.print(prompt); - GridClientConfiguration cfg = new GridClientConfiguration(); + return IN.nextLine(); + } - cfg.setServers(Collections.singletonList(args.host() + ":" + args.port())); - if (!F.isEmpty(args.user())) { - cfg.setSecurityCredentialsProvider( - new SecurityCredentialsBasicProvider(new SecurityCredentials(args.user(), args.password()))); - } + /** + * Requests interactive user confirmation if forthcoming operation is dangerous. + * + * @return {@code true} if operation confirmed (or not needed), {@code false} otherwise. + */ + private boolean confirm(String str) { + if (str == null) + return true; - try (GridClient client = GridClientFactory.start(cfg)) { + String prompt = str + "\nPress '" + CONFIRM_MSG + "' to continue . . . "; - switch (args.command()) { - case ACTIVATE: - activate(client); - break; + return CONFIRM_MSG.equalsIgnoreCase(readLine(prompt)); + } - case DEACTIVATE: - deactivate(client); - break; + /** + * @param e Exception to check. + * @return {@code true} if specified exception is {@link GridClientAuthenticationException}. + */ + private static boolean isAuthError(Throwable e) { + return X.hasCause(e, GridClientAuthenticationException.class); + } - case STATE: - state(client); - break; + /** + * @param e Exception to check. + * @return {@code true} if specified exception is a connection error. + */ + private static boolean isConnectionError(Throwable e) { + return e instanceof GridClientClosedException || + e instanceof GridClientConnectionResetException || + e instanceof GridClientDisconnectedException || + e instanceof GridClientHandshakeException || + e instanceof GridServerUnreachableException; + } - case BASELINE: - baseline(client, args.baselineAction(), args.baselineArguments()); - break; - } - } + /** + * Requests password from console with message. + * + * @param msg Message. + * @return Password. + */ + private char[] requestPasswordFromConsole(String msg) { + if (console == null) + throw new UnsupportedOperationException("Failed to securely read password (console is unavailable): " + msg); + else + return console.readPassword(msg); + } - return 0; - } - catch (IllegalArgumentException e) { - return error(EXIT_CODE_INVALID_ARGUMENTS, "Check arguments.", e); - } - catch (Throwable e) { - if (isAuthError(e)) - return error(ERR_AUTHENTICATION_FAILED, "Authentication error.", e); + /** + * Requests user data from console with message. + * + * @param msg Message. + * @return Input user data. + */ + private String requestDataFromConsole(String msg) { + if (console != null) + return console.readLine(msg); + else { + Scanner scanner = new Scanner(System.in); - if (isConnectionError(e)) - return error(EXIT_CODE_CONNECTION_FAILED, "Connection to cluster failed.", e); + logger.info(msg); - return error(EXIT_CODE_UNEXPECTED_ERROR, "", e); + return scanner.nextLine(); } } /** - * @param args Arguments to parse and apply. + * Split string into items. + * + * @param s String to process. + * @param delim Delimiter. + * @return List with items. */ - public static void main(String[] args) { - CommandHandler hnd = new CommandHandler(); + private static List split(String s, String delim) { + if (F.isEmpty(s)) + return Collections.emptyList(); + + return Arrays.stream(s.split(delim)) + .map(String::trim) + .filter(item -> !item.isEmpty()) + .collect(Collectors.toList()); + } - System.exit(hnd.execute(Arrays.asList(args))); + /** */ + private void printHelp() { + logger.info("Control utility script is used to execute admin commands on cluster or get common cluster info. " + + "The command has the following syntax:"); + logger.info(""); + + logger.info(INDENT + CommandLogger.join(" ", CommandLogger.join(" ", UTILITY_NAME, CommandLogger.join(" ", getCommonOptions())), + optional("command"), "")); + logger.info(""); + logger.info(""); + + logger.info("This utility can do the following commands:"); + + Arrays.stream(CommandList.values()).forEach(c -> c.command().printUsage(logger)); + + logger.info("By default commands affecting the cluster require interactive confirmation."); + logger.info("Use " + CMD_AUTO_CONFIRMATION + " option to disable it."); + logger.info(""); + + logger.info("Default values:"); + logger.info(DOUBLE_INDENT + "HOST_OR_IP=" + DFLT_HOST); + logger.info(DOUBLE_INDENT + "PORT=" + DFLT_PORT); + logger.info(DOUBLE_INDENT + "PING_INTERVAL=" + DFLT_PING_INTERVAL); + logger.info(DOUBLE_INDENT + "PING_TIMEOUT=" + DFLT_PING_TIMEOUT); + logger.info(DOUBLE_INDENT + "SSL_PROTOCOL=" + SslContextFactory.DFLT_SSL_PROTOCOL); + logger.info(DOUBLE_INDENT + "SSL_KEY_ALGORITHM=" + SslContextFactory.DFLT_KEY_ALGORITHM); + logger.info(DOUBLE_INDENT + "KEYSTORE_TYPE=" + SslContextFactory.DFLT_STORE_TYPE); + logger.info(DOUBLE_INDENT + "TRUSTSTORE_TYPE=" + SslContextFactory.DFLT_STORE_TYPE); + + logger.info(""); + + logger.info("Exit codes:"); + logger.info(DOUBLE_INDENT + EXIT_CODE_OK + " - successful execution."); + logger.info(DOUBLE_INDENT + EXIT_CODE_INVALID_ARGUMENTS + " - invalid arguments."); + logger.info(DOUBLE_INDENT + EXIT_CODE_CONNECTION_FAILED + " - connection failed."); + logger.info(DOUBLE_INDENT + ERR_AUTHENTICATION_FAILED + " - authentication failed."); + logger.info(DOUBLE_INDENT + EXIT_CODE_UNEXPECTED_ERROR + " - unexpected error."); } } - diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandList.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandList.java new file mode 100644 index 0000000000000..5e5d4f0cb5a6c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandList.java @@ -0,0 +1,110 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline; + +import org.apache.ignite.internal.commandline.cache.CacheCommands; +import org.apache.ignite.internal.commandline.diagnostic.DiagnosticCommand; + +/** + * High-level commands. + */ +public enum CommandList { + /** */ + ACTIVATE("--activate", new ActivateCommand()), + + /** */ + DEACTIVATE("--deactivate", new DeactivateCommand()), + + /** */ + STATE("--state", new StateCommand()), + + /** */ + BASELINE("--baseline", new BaselineCommand()), + + /** */ + TX("--tx", new TxCommands()), + + /** */ + CACHE("--cache", new CacheCommands()), + + /** */ + WAL("--wal", new WalCommands()), + + /** */ + DIAGNOSTIC("--diagnostic", new DiagnosticCommand()); + + /** Private values copy so there's no need in cloning it every time. */ + private static final CommandList[] VALUES = CommandList.values(); + + /** */ + private final String text; + + /** Command implementation. */ + private final Command command; + + /** + * @param text Text. + * @param command Command implementation. + */ + CommandList(String text, Command command) { + this.text = text; + this.command = command; + } + + /** + * @param text Command text. + * @return Command for the text. + */ + public static CommandList of(String text) { + for (CommandList cmd : VALUES) { + if (cmd.text().equalsIgnoreCase(text)) + return cmd; + } + + return null; + } + + /** + * @return Command text. + */ + public String text() { + return text; + } + + /** + * @return Command implementation. + */ + public Command command() { + return command; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return text; + } + + /** + * @return command name + */ + public String toCommandName() { + return text.substring(2).toUpperCase(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandLogger.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandLogger.java new file mode 100644 index 0000000000000..64ae204f7b406 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommandLogger.java @@ -0,0 +1,148 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + + +package org.apache.ignite.internal.commandline; + +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.SB; + +/** + * Utility class for creating {@code CommangHandler} log messages. + */ +public class CommandLogger { + /** Indent for help output. */ + public static final String INDENT = " "; + + /** Double indent for help output. */ + public static final String DOUBLE_INDENT = INDENT + INDENT; + + /** + * Join input parameters with specified {@code delimeter} between them. + * + * @param delimeter Specified delimeter. + * @param params Other input parameter. + * @return Joined paramaters with specified {@code delimeter}. + */ + public static String join(String delimeter, Object... params) { + return join(new SB(), "", delimeter, params).toString(); + } + + /** + * Join input parameters with specified {@code delimeter} between them and append to the end {@code delimeter}. + * + * @param sb Specified string builder. + * @param sbDelimeter Delimeter between {@code sb} and appended {@code param}. + * @param delimeter Specified delimeter. + * @param params Other input parameter. + * @return SB with appended to the end joined paramaters with specified {@code delimeter}. + */ + public static SB join(SB sb, String sbDelimeter, String delimeter, Object... params) { + if (!F.isEmpty(params)) { + sb.a(sbDelimeter); + + for (Object par : params) + sb.a(par).a(delimeter); + + sb.setLength(sb.length() - delimeter.length()); + } + + return sb; + } + + + /** + * Join input parameters with space and wrap optional braces {@code []}. + * + * @param params Other input parameter. + * @return Joined parameters wrapped optional braces. + */ + public static String optional(Object... params) { + return join(new SB(), "[", " ", params).a("]").toString(); + } + + /** + * Concatenates input parameters to single string with OR delimiter {@code |}. + * + * @param params Remaining parameters. + * @return Concatenated string. + */ + public static String or(Object... params) { + return join("|", params); + } + + /** + * Join input parameters with space and wrap grouping braces {@code ()}. + * + * @param params Input parameter. + * @return Joined parameters wrapped grouped braces. + */ + public static String grouped(Object... params) { + return join(new SB(), "(", " ", params).a(")").toString(); + } + + /** + * Generates readable error message from exception + * @param e Exctption + * @return error message + */ + public static String errorMessage(Throwable e) { + String msg = e.getMessage(); + + if (F.isEmpty(msg)) + msg = e.getClass().getName(); + else if (msg.startsWith("Failed to handle request")) { + int p = msg.indexOf("err="); + + msg = msg.substring(p + 4, msg.length() - 1); + } + + return msg; + } + + /** + * Prints exception messages to log + * + * @param exceptions map containing node ids and exceptions + * @param infoMsg single message to log + * @param logger Logger to use + * @return true if errors were printed. + */ + public static boolean printErrors(Map exceptions, String infoMsg, Logger logger) { + if (!F.isEmpty(exceptions)) { + logger.info(infoMsg); + + for (Map.Entry e : exceptions.entrySet()) { + logger.info(INDENT + "Node ID: " + e.getKey()); + + logger.info(INDENT + "Exception message:"); + logger.info(DOUBLE_INDENT + e.getValue().getMessage()); + logger.info(""); + } + + return true; + } + + return false; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java new file mode 100644 index 0000000000000..0867cd1761675 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/CommonArgParser.java @@ -0,0 +1,357 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline; + +import java.util.ArrayList; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Set; +import java.util.logging.Logger; +import org.apache.ignite.ssl.SslContextFactory; + +import static org.apache.ignite.internal.client.GridClientConfiguration.DFLT_PING_INTERVAL; +import static org.apache.ignite.internal.client.GridClientConfiguration.DFLT_PING_TIMEOUT; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.TaskExecutor.DFLT_HOST; +import static org.apache.ignite.internal.commandline.TaskExecutor.DFLT_PORT; +import static org.apache.ignite.ssl.SslContextFactory.DFLT_SSL_PROTOCOL; + +/** + * Common argument parser. + * Also would parse high-level command and delegate parsing for its argument to the command. + */ +public class CommonArgParser { + /** */ + private final Logger logger; + + /** */ + static final String CMD_HOST = "--host"; + + /** */ + static final String CMD_PORT = "--port"; + + /** */ + static final String CMD_PASSWORD = "--password"; + + /** */ + static final String CMD_USER = "--user"; + + /** Option is used for auto confirmation. */ + static final String CMD_AUTO_CONFIRMATION = "--yes"; + + /** */ + static final String CMD_PING_INTERVAL = "--ping-interval"; + + /** */ + static final String CMD_PING_TIMEOUT = "--ping-timeout"; + + // SSL configuration section + + /** */ + static final String CMD_SSL_PROTOCOL = "--ssl-protocol"; + + /** */ + static final String CMD_SSL_KEY_ALGORITHM = "--ssl-key-algorithm"; + + /** */ + static final String CMD_SSL_CIPHER_SUITES = "--ssl-cipher-suites"; + + /** */ + static final String CMD_KEYSTORE = "--keystore"; + + /** */ + static final String CMD_KEYSTORE_PASSWORD = "--keystore-password"; + + /** */ + static final String CMD_KEYSTORE_TYPE = "--keystore-type"; + + /** */ + static final String CMD_TRUSTSTORE = "--truststore"; + + /** */ + static final String CMD_TRUSTSTORE_PASSWORD = "--truststore-password"; + + /** */ + static final String CMD_TRUSTSTORE_TYPE = "--truststore-type"; + + /** List of optional auxiliary commands. */ + private static final Set AUX_COMMANDS = new HashSet<>(); + + /** Set of sensitive arguments */ + private static final Set SENSITIVE_ARGUMENTS = new HashSet<>(); + + static { + AUX_COMMANDS.add(CMD_HOST); + AUX_COMMANDS.add(CMD_PORT); + + AUX_COMMANDS.add(CMD_PASSWORD); + AUX_COMMANDS.add(CMD_USER); + + AUX_COMMANDS.add(CMD_AUTO_CONFIRMATION); + + AUX_COMMANDS.add(CMD_PING_INTERVAL); + AUX_COMMANDS.add(CMD_PING_TIMEOUT); + + AUX_COMMANDS.add(CMD_SSL_PROTOCOL); + AUX_COMMANDS.add(CMD_SSL_KEY_ALGORITHM); + AUX_COMMANDS.add(CMD_SSL_CIPHER_SUITES); + + AUX_COMMANDS.add(CMD_KEYSTORE); + AUX_COMMANDS.add(CMD_KEYSTORE_PASSWORD); + AUX_COMMANDS.add(CMD_KEYSTORE_TYPE); + + AUX_COMMANDS.add(CMD_TRUSTSTORE); + AUX_COMMANDS.add(CMD_TRUSTSTORE_PASSWORD); + AUX_COMMANDS.add(CMD_TRUSTSTORE_TYPE); + + SENSITIVE_ARGUMENTS.add(CMD_PASSWORD); + SENSITIVE_ARGUMENTS.add(CMD_KEYSTORE_PASSWORD); + SENSITIVE_ARGUMENTS.add(CMD_TRUSTSTORE_PASSWORD); + } + + /** + * @param arg To check. + * @return True if provided argument is among sensitive one and not should be displayed. + */ + public static boolean isSensitiveArgument(String arg) { + return SENSITIVE_ARGUMENTS.contains(arg); + } + + + /** + * @param logger Logger. + */ + public CommonArgParser(Logger logger) { + this.logger = logger; + } + + /** + * Creates list of common utility options. + * + * @return Array of common utility options. + */ + public static String[] getCommonOptions() { + List list = new ArrayList<>(32); + + list.add(optional(CMD_HOST, "HOST_OR_IP")); + list.add(optional(CMD_PORT, "PORT")); + list.add(optional(CMD_USER, "USER")); + list.add(optional(CMD_PASSWORD, "PASSWORD")); + list.add(optional(CMD_PING_INTERVAL, "PING_INTERVAL")); + list.add(optional(CMD_PING_TIMEOUT, "PING_TIMEOUT")); + list.add(optional(CMD_SSL_PROTOCOL, "SSL_PROTOCOL[, SSL_PROTOCOL_2, ..., SSL_PROTOCOL_N]")); + list.add(optional(CMD_SSL_CIPHER_SUITES, "SSL_CIPHER_1[, SSL_CIPHER_2, ..., SSL_CIPHER_N]")); + list.add(optional(CMD_SSL_KEY_ALGORITHM, "SSL_KEY_ALGORITHM")); + list.add(optional(CMD_KEYSTORE_TYPE, "KEYSTORE_TYPE")); + list.add(optional(CMD_KEYSTORE, "KEYSTORE_PATH")); + list.add(optional(CMD_KEYSTORE_PASSWORD, "KEYSTORE_PASSWORD")); + list.add(optional(CMD_TRUSTSTORE_TYPE, "TRUSTSTORE_TYPE")); + list.add(optional(CMD_TRUSTSTORE, "TRUSTSTORE_PATH")); + list.add(optional(CMD_TRUSTSTORE_PASSWORD, "TRUSTSTORE_PASSWORD")); + + return list.toArray(new String[0]); + } + + /** + * Parses and validates arguments. + * + * @param rawArgIter Iterator of arguments. + * @return Arguments bean. + * @throws IllegalArgumentException In case arguments aren't valid. + */ + ConnectionAndSslParameters parseAndValidate(Iterator rawArgIter) { + String host = DFLT_HOST; + + String port = DFLT_PORT; + + String user = null; + + String pwd = null; + + Long pingInterval = DFLT_PING_INTERVAL; + + Long pingTimeout = DFLT_PING_TIMEOUT; + + boolean autoConfirmation = false; + + String sslProtocol = DFLT_SSL_PROTOCOL; + + String sslCipherSuites = ""; + + String sslKeyAlgorithm = SslContextFactory.DFLT_KEY_ALGORITHM; + + String sslKeyStoreType = SslContextFactory.DFLT_STORE_TYPE; + + String sslKeyStorePath = null; + + char sslKeyStorePassword[] = null; + + String sslTrustStoreType = SslContextFactory.DFLT_STORE_TYPE; + + String sslTrustStorePath = null; + + char sslTrustStorePassword[] = null; + + CommandArgIterator argIter = new CommandArgIterator(rawArgIter, AUX_COMMANDS); + + CommandList command = null; + + while (argIter.hasNextArg()) { + String str = argIter.nextArg("").toLowerCase(); + + CommandList cmd = CommandList.of(str); + + if (cmd != null) { + if (command != null) + throw new IllegalArgumentException("Only one action can be specified, but found at least two:" + + cmd.toString() + ", " + command.toString()); + + cmd.command().parseArguments(argIter); + + command = cmd; + } + else { + + switch (str) { + case CMD_HOST: + host = argIter.nextArg("Expected host name"); + + break; + + case CMD_PORT: + port = argIter.nextArg("Expected port number"); + + try { + int p = Integer.parseInt(port); + + if (p <= 0 || p > 65535) + throw new IllegalArgumentException("Invalid value for port: " + port); + } + catch (NumberFormatException ignored) { + throw new IllegalArgumentException("Invalid value for port: " + port); + } + + break; + + case CMD_PING_INTERVAL: + pingInterval = argIter.nextLongArg("ping interval"); + + break; + + case CMD_PING_TIMEOUT: + pingTimeout = argIter.nextLongArg("ping timeout"); + + break; + + case CMD_USER: + user = argIter.nextArg("Expected user name"); + + break; + + case CMD_PASSWORD: + pwd = argIter.nextArg("Expected password"); + + logger.info(securityWarningMessage(CMD_PASSWORD)); + + break; + + case CMD_SSL_PROTOCOL: + sslProtocol = argIter.nextArg("Expected SSL protocol"); + + break; + + case CMD_SSL_CIPHER_SUITES: + sslCipherSuites = argIter.nextArg("Expected SSL cipher suites"); + + break; + + case CMD_SSL_KEY_ALGORITHM: + sslKeyAlgorithm = argIter.nextArg("Expected SSL key algorithm"); + + break; + + case CMD_KEYSTORE: + sslKeyStorePath = argIter.nextArg("Expected SSL key store path"); + + break; + + case CMD_KEYSTORE_PASSWORD: + sslKeyStorePassword = argIter.nextArg("Expected SSL key store password").toCharArray(); + + logger.info(securityWarningMessage(CMD_KEYSTORE_PASSWORD)); + + break; + + case CMD_KEYSTORE_TYPE: + sslKeyStoreType = argIter.nextArg("Expected SSL key store type"); + + break; + + case CMD_TRUSTSTORE: + sslTrustStorePath = argIter.nextArg("Expected SSL trust store path"); + + break; + + case CMD_TRUSTSTORE_PASSWORD: + sslTrustStorePassword = argIter.nextArg("Expected SSL trust store password").toCharArray(); + + logger.info(securityWarningMessage(CMD_TRUSTSTORE_PASSWORD)); + + break; + + case CMD_TRUSTSTORE_TYPE: + sslTrustStoreType = argIter.nextArg("Expected SSL trust store type"); + + break; + + case CMD_AUTO_CONFIRMATION: + autoConfirmation = true; + + break; + + default: + throw new IllegalArgumentException("Unexpected argument: " + str); + } + } + } + + if (command == null) + throw new IllegalArgumentException("No action was specified"); + + return new ConnectionAndSslParameters(command.command(), host, port, user, pwd, + pingTimeout, pingInterval, autoConfirmation, + sslProtocol, sslCipherSuites, + sslKeyAlgorithm, sslKeyStorePath, sslKeyStorePassword, sslKeyStoreType, + sslTrustStorePath, sslTrustStorePassword, sslTrustStoreType); + } + + /** + * @param password Parsed password. + * @return String with warning to show for user. + */ + private String securityWarningMessage(String password) { + final String pwdArgWarnFmt = "Warning: %s is insecure. " + + "Whenever possible, use interactive prompt for password (just discard %s option)."; + + return String.format(pwdArgWarnFmt, password, password); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/ConnectionAndSslParameters.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ConnectionAndSslParameters.java new file mode 100644 index 0000000000000..b73c0fd739087 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/ConnectionAndSslParameters.java @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline; + +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.util.tostring.GridToStringExclude; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Container with common parsed and validated arguments. + */ +public class ConnectionAndSslParameters { + /** Host. */ + private String host; + + /** Port. */ + private String port; + + /** User. */ + private String user; + + /** Password. */ + @GridToStringExclude + private String pwd; + + /** Force option is used for auto confirmation. */ + private boolean autoConfirmation; + + /** Ping timeout for grid client. See {@link GridClientConfiguration#getPingTimeout()}. */ + private long pingTimeout; + + /** Ping interval for grid client. See {@link GridClientConfiguration#getPingInterval()}. */ + private long pingInterval; + + /** SSL Protocol. */ + private String sslProtocol; + + /** SSL Cipher suites. */ + private String sslCipherSuites; + + /** SSL Key Algorithm. */ + private String sslKeyAlgorithm; + + /** Keystore. */ + private String sslKeyStorePath; + + /** Keystore Type. */ + private String sslKeyStoreType; + + /** Keystore Password. */ + @GridToStringExclude + private char[] sslKeyStorePassword; + + /** Truststore. */ + private String sslTrustStorePath; + + /** Truststore Type. */ + private String sslTrustStoreType; + + /** Truststore Password. */ + @GridToStringExclude + private char[] sslTrustStorePassword; + + /** High-level command. */ + private Command command; + + /** + * @param command Command. + * @param host Host. + * @param port Port. + * @param user User. + * @param pwd Password. + * @param pingTimeout Ping timeout. See {@link GridClientConfiguration#getPingTimeout()}. + * @param pingInterval Ping interval. See {@link GridClientConfiguration#getPingInterval()}. + * @param autoConfirmation Auto confirmation flag. + * @param sslProtocol SSL Protocol. + * @param sslCipherSuites SSL cipher suites. + * @param sslKeyAlgorithm SSL Key Algorithm. + * @param sslKeyStorePath Keystore. + * @param sslKeyStorePassword Keystore Password. + * @param sslKeyStoreType Keystore Type. + * @param sslTrustStorePath Truststore. + * @param sslTrustStorePassword Truststore Password. + * @param sslTrustStoreType Truststore Type. + */ + public ConnectionAndSslParameters(Command command, String host, String port, String user, String pwd, + Long pingTimeout, Long pingInterval, boolean autoConfirmation, + String sslProtocol, String sslCipherSuites, String sslKeyAlgorithm, + String sslKeyStorePath, char[] sslKeyStorePassword, String sslKeyStoreType, + String sslTrustStorePath, char[] sslTrustStorePassword, String sslTrustStoreType + ) { + this.command = command; + this.host = host; + this.port = port; + this.user = user; + this.pwd = pwd; + + this.pingTimeout = pingTimeout; + this.pingInterval = pingInterval; + + this.autoConfirmation = autoConfirmation; + + this.sslProtocol = sslProtocol; + this.sslCipherSuites = sslCipherSuites; + + this.sslKeyAlgorithm = sslKeyAlgorithm; + this.sslKeyStorePath = sslKeyStorePath; + this.sslKeyStoreType = sslKeyStoreType; + this.sslKeyStorePassword = sslKeyStorePassword; + + this.sslTrustStorePath = sslTrustStorePath; + this.sslTrustStoreType = sslTrustStoreType; + this.sslTrustStorePassword = sslTrustStorePassword; + } + + /** + * @return High-level command which were defined by user to run. + */ + public Command command() { + return command; + } + + /** + * @return host name + */ + public String host() { + return host; + } + + /** + * @return port number + */ + public String port() { + return port; + } + + /** + * @return user name + */ + public String userName() { + return user; + } + + /** + * @param user New user name. + */ + public void userName(String user) { + this.user = user; + } + + /** + * @return password + */ + public String password() { + return pwd; + } + + /** + * @param pwd New password. + */ + public void password(String pwd) { + this.pwd = pwd; + } + + /** + * See {@link GridClientConfiguration#getPingInterval()}. + * + * @return Ping timeout. + */ + public long pingTimeout() { + return pingTimeout; + } + + /** + * See {@link GridClientConfiguration#getPingInterval()}. + * + * @return Ping interval. + */ + public long pingInterval() { + return pingInterval; + } + + /** + * @return Auto confirmation option. + */ + public boolean autoConfirmation() { + return autoConfirmation; + } + + /** + * @return SSL protocol + */ + public String sslProtocol() { + return sslProtocol; + } + + /** + * @return SSL cipher suites. + */ + public String getSslCipherSuites() { + return sslCipherSuites; + } + + /** + * @return SSL Key Algorithm + */ + public String sslKeyAlgorithm() { + return sslKeyAlgorithm; + } + + /** + * @return Keystore + */ + public String sslKeyStorePath() { + return sslKeyStorePath; + } + + /** + * @return Keystore type + */ + public String sslKeyStoreType() { + return sslKeyStoreType; + } + + /** + * @return Keystore password + */ + public char[] sslKeyStorePassword() { + return sslKeyStorePassword; + } + + /** + * @return Truststore + */ + public String sslTrustStorePath() { + return sslTrustStorePath; + } + + /** + * @return Truststore type + */ + public String sslTrustStoreType() { + return sslTrustStoreType; + } + + /** + * @return Truststore password + */ + public char[] sslTrustStorePassword() { + return sslTrustStorePassword; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(ConnectionAndSslParameters.class, this, + "password", pwd == null ? null : "*****", + "sslKeyStorePassword", sslKeyStorePassword == null ? null: "*****", + "sslTrustStorePassword", sslTrustStorePassword == null? null: "*****" + ); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java new file mode 100644 index 0000000000000..0d90c35264982 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/DeactivateCommand.java @@ -0,0 +1,78 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientClusterState; +import org.apache.ignite.internal.client.GridClientConfiguration; + +import static org.apache.ignite.internal.commandline.CommandList.DEACTIVATE; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommonArgParser.CMD_AUTO_CONFIRMATION; + +/** + * Command to deactivate cluster. + */ +public class DeactivateCommand implements Command { + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + Command.usage(logger, "Deactivate cluster:", DEACTIVATE, optional(CMD_AUTO_CONFIRMATION)); + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + return "Warning: the command will deactivate a cluster."; + } + + /** + * Deactivate cluster. + * + * @param clientCfg Client configuration. + * @throws Exception If failed to deactivate. + */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + GridClientClusterState state = client.state(); + + state.active(false); + + logger.info("Cluster deactivated"); + } + catch (Exception e) { + logger.severe("Failed to deactivate cluster."); + + throw e; + } + + return null; + } + + /** {@inheritDoc} */ + @Override public Void arg() { + return null; + } + + /** {@inheritDoc} */ + @Override public String name() { + return DEACTIVATE.toCommandName(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/GridConsole.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/GridConsole.java new file mode 100644 index 0000000000000..ca8c673c7fff9 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/GridConsole.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline; + +import java.io.Console; +import java.io.PrintWriter; +import java.io.Reader; + +/** + * Interface with {@link Console} methods and contract. + */ +public interface GridConsole { + /** See {@link Console#writer()}. */ + PrintWriter writer(); + + /** See {@link Console#reader()}. */ + Reader reader(); + + /** See {@link Console#format(String, Object...)}. */ + Console format(String fmt, Object... args); + + /** See {@link Console#printf(String, Object...)}. */ + Console printf(String format, Object... args); + + /** See {@link Console#readLine(String, Object...)}. */ + String readLine(String fmt, Object... args); + + /** See {@link Console#readLine()}. */ + String readLine(); + + /** See {@link Console#readPassword(String, Object...)}. */ + char[] readPassword(String fmt, Object... args); + + /** See {@link Console#readPassword()}. */ + char[] readPassword(); + + /** See {@link Console#flush()}. */ + void flush(); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/GridConsoleAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/GridConsoleAdapter.java new file mode 100644 index 0000000000000..b66e958b5a908 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/GridConsoleAdapter.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline; + +import java.io.Console; +import java.io.PrintWriter; +import java.io.Reader; +import org.jetbrains.annotations.Nullable; + +/** + * Default implementation of {@link GridConsole} like {@link Console} proxy. + */ +public class GridConsoleAdapter implements GridConsole { + /** Delegate. */ + private final Console delegate; + + /** */ + public static @Nullable GridConsoleAdapter getInstance() { + Console console = System.console(); + + return console == null ? null : new GridConsoleAdapter(console); + } + + /** Constructor. */ + private GridConsoleAdapter(Console delegate) { + if (delegate == null) + throw new NullPointerException("Console is not available."); + + this.delegate = delegate; + } + + /** {@inheritDoc} */ + @Override public PrintWriter writer() { + return delegate.writer(); + } + + /** {@inheritDoc} */ + @Override public Reader reader() { + return delegate.reader(); + } + + /** {@inheritDoc} */ + @Override public Console format(String fmt, Object... args) { + return delegate.format(fmt, args); + } + + /** {@inheritDoc} */ + @Override public Console printf(String format, Object... args) { + return delegate.printf(format, args); + } + + /** {@inheritDoc} */ + @Override public String readLine(String fmt, Object... args) { + return delegate.readLine(fmt, args); + } + + /** {@inheritDoc} */ + @Override public String readLine() { + return delegate.readLine(); + } + + /** {@inheritDoc} */ + @Override public char[] readPassword(String fmt, Object... args) { + return delegate.readPassword(fmt, args); + } + + /** {@inheritDoc} */ + @Override public char[] readPassword() { + return delegate.readPassword(); + } + + /** {@inheritDoc} */ + @Override public void flush() { + delegate.flush(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/NoopConsole.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/NoopConsole.java new file mode 100644 index 0000000000000..3bdafc7456f25 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/NoopConsole.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline; + +import java.io.Console; +import java.io.PrintWriter; +import java.io.Reader; + +/** + * Default implementation of {@link GridConsole}. + */ +public class NoopConsole implements GridConsole { + /** {@inheritDoc} */ + @Override public PrintWriter writer() { + return null; + } + + /** {@inheritDoc} */ + @Override public Reader reader() { + return null; + } + + /** {@inheritDoc} */ + @Override public Console format(String fmt, Object... args) { + return null; + } + + /** {@inheritDoc} */ + @Override public Console printf(String format, Object... args) { + return null; + } + + /** {@inheritDoc} */ + @Override public String readLine(String fmt, Object... args) { + return null; + } + + /** {@inheritDoc} */ + @Override public String readLine() { + return null; + } + + /** {@inheritDoc} */ + @Override public char[] readPassword(String fmt, Object... args) { + return new char[0]; + } + + /** {@inheritDoc} */ + @Override public char[] readPassword() { + return new char[0]; + } + + /** {@inheritDoc} */ + @Override public void flush() { + /* No-op. */ + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/OutputFormat.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/OutputFormat.java new file mode 100644 index 0000000000000..356cb4b2e88e5 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/OutputFormat.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.commandline; + +import org.jetbrains.annotations.NotNull; + +/** + * + */ +public enum OutputFormat { + /** Single line. */ + SINGLE_LINE("single-line"), + + /** Multi line. */ + MULTI_LINE("multi-line"); + + /** */ + private final String text; + + /** */ + OutputFormat(String text) { + this.text = text; + } + + /** + * @return Text. + */ + public String text() { + return text; + } + + /** + * Converts format name in console to enumerated value. + * + * @param text Format name in console. + * @return Enumerated value. + * @throws IllegalArgumentException If enumerated value not found. + */ + public static OutputFormat fromConsoleName(@NotNull String text) { + for (OutputFormat format : values()) { + if (format.text.equals(text)) + return format; + } + + throw new IllegalArgumentException("Unknown output format " + text); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return text; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java new file mode 100644 index 0000000000000..d3c730638b47b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/StateCommand.java @@ -0,0 +1,69 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline; + +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientClusterState; +import org.apache.ignite.internal.client.GridClientConfiguration; + +import static org.apache.ignite.internal.commandline.CommandList.STATE; + +/** + * Command to print cluster state. + */ +public class StateCommand implements Command { + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + Command.usage(logger, "Print current cluster state:", STATE); + } + + /** + * Print cluster state. + * + * @param clientCfg Client configuration. + * @throws Exception If failed to print state. + */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + try (GridClient client = Command.startClient(clientCfg)){ + GridClientClusterState state = client.state(); + + logger.info("Cluster is " + (state.active() ? "active" : "inactive")); + } + catch (Throwable e) { + logger.severe("Failed to get cluster state."); + + throw e; + } + + return null; + } + + /** {@inheritDoc} */ + @Override public Void arg() { + return null; + } + + /** {@inheritDoc} */ + @Override public String name() { + return STATE.toCommandName(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/TaskExecutor.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/TaskExecutor.java new file mode 100644 index 0000000000000..0bbbd7e60653d --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/TaskExecutor.java @@ -0,0 +1,202 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + + +package org.apache.ignite.internal.commandline; + +import java.io.IOException; +import java.net.InetAddress; +import java.util.Collection; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import org.apache.ignite.compute.ComputeTask; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientCompute; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.client.GridClientDisconnectedException; +import org.apache.ignite.internal.client.GridClientException; +import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.util.IgniteUtils; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.visor.VisorTaskArgument; +import org.apache.ignite.lang.IgniteBiTuple; + +/** + * Visor task executor. + */ +public class TaskExecutor { + /** */ + public static final String DFLT_HOST = "127.0.0.1"; + + /** */ + public static final String DFLT_PORT = "11211"; + + /** Broadcast uuid. */ + public static final UUID BROADCAST_UUID = UUID.randomUUID(); + + /** + * @param client Client + * @param taskClsName Task class name. + * @param taskArgs Task args. + * @param nodeId Node ID to execute task at (if null, random node will be chosen by balancer). + * @param clientCfg + * @return Task result. + * @throws GridClientException If failed to execute task. + */ + public static R executeTaskByNameOnNode( + GridClient client, + String taskClsName, + Object taskArgs, + UUID nodeId, + GridClientConfiguration clientCfg + ) throws GridClientException { + GridClientCompute compute = client.compute(); + + if (nodeId == BROADCAST_UUID) { + Collection nodes = compute.nodes(GridClientNode::connectable); + + if (F.isEmpty(nodes)) + throw new GridClientDisconnectedException("Connectable nodes not found", null); + + List nodeIds = nodes.stream() + .map(GridClientNode::nodeId) + .collect(Collectors.toList()); + + return client.compute().execute(taskClsName, new VisorTaskArgument<>(nodeIds, taskArgs, false)); + } + + GridClientNode node = null; + + if (nodeId == null) { + // Prefer node from connect string. + final String cfgAddr = clientCfg.getServers().iterator().next(); + + String[] parts = cfgAddr.split(":"); + + if (DFLT_HOST.equals(parts[0])) { + InetAddress addr; + + try { + addr = IgniteUtils.getLocalHost(); + } + catch (IOException e) { + throw new GridClientException("Can't get localhost name.", e); + } + + if (addr.isLoopbackAddress()) + throw new GridClientException("Can't find localhost name."); + + String origAddr = addr.getHostName() + ":" + parts[1]; + + node = listHosts(client).filter(tuple -> origAddr.equals(tuple.get2())).findFirst().map(IgniteBiTuple::get1).orElse(null); + + if (node == null) + node = listHostsByClientNode(client).filter(tuple -> tuple.get2().size() == 1 && cfgAddr.equals(tuple.get2().get(0))). + findFirst().map(IgniteBiTuple::get1).orElse(null); + } + else + node = listHosts(client).filter(tuple -> cfgAddr.equals(tuple.get2())).findFirst().map(IgniteBiTuple::get1).orElse(null); + + // Otherwise choose random node. + if (node == null) + node = getBalancedNode(compute); + } + else { + for (GridClientNode n : compute.nodes()) { + if (n.connectable() && nodeId.equals(n.nodeId())) { + node = n; + + break; + } + } + + if (node == null) + throw new IllegalArgumentException("Node with id=" + nodeId + " not found"); + } + + return compute.projection(node).execute(taskClsName, new VisorTaskArgument<>(node.nodeId(), taskArgs, false)); + } + + /** + * @param client Client. + * @param taskCls Task class. + * @param taskArgs Task arguments. + * @param clientCfg Client configuration. + * @return Task result. + * @throws GridClientException If failed to execute task. + */ + public static R executeTask( + GridClient client, + Class> taskCls, + Object taskArgs, + GridClientConfiguration clientCfg + ) throws GridClientException { + return executeTaskByNameOnNode(client, taskCls.getName(), taskArgs, null, clientCfg); + } + + /** + * @param client Client. + * @return List of hosts. + */ + private static Stream> listHosts(GridClient client) throws GridClientException { + return client.compute() + .nodes(GridClientNode::connectable) + .stream() + .flatMap(node -> Stream.concat( + node.tcpAddresses() == null ? Stream.empty() : node.tcpAddresses().stream(), + node.tcpHostNames() == null ? Stream.empty() : node.tcpHostNames().stream() + ).map(addr -> new IgniteBiTuple<>(node, addr + ":" + node.tcpPort()))); + } + + /** + * @param client Client. + * @return List of hosts. + */ + private static Stream>> listHostsByClientNode( + GridClient client + ) throws GridClientException { + return client.compute().nodes(GridClientNode::connectable).stream() + .map( + node -> new IgniteBiTuple<>( + node, + Stream.concat( + node.tcpAddresses() == null ? Stream.empty() : node.tcpAddresses().stream(), + node.tcpHostNames() == null ? Stream.empty() : node.tcpHostNames().stream() + ) + .map(addr -> addr + ":" + node.tcpPort()).collect(Collectors.toList()) + ) + ); + } + + /** + * @param compute instance + * @return balanced node + */ + private static GridClientNode getBalancedNode(GridClientCompute compute) throws GridClientException { + Collection nodes = compute.nodes(GridClientNode::connectable); + + if (F.isEmpty(nodes)) + throw new GridClientDisconnectedException("Connectable node not found", null); + + return compute.balancer().balancedNode(nodes); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/TxCommandArg.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/TxCommandArg.java new file mode 100644 index 0000000000000..c2ea6df873834 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/TxCommandArg.java @@ -0,0 +1,82 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + + +package org.apache.ignite.internal.commandline; + +import org.apache.ignite.internal.commandline.argument.CommandArg; + +/** + * Transaction command arguments name. + */ +public enum TxCommandArg implements CommandArg { + /** */ + TX_LIMIT("--limit"), + + /** */ + TX_ORDER("--order"), + + /** */ + TX_SERVERS("--servers"), + + /** */ + TX_CLIENTS("--clients"), + + /** */ + TX_DURATION("--min-duration"), + + /** */ + TX_SIZE("--min-size"), + + /** */ + TX_LABEL("--label"), + + /** */ + TX_NODES("--nodes"), + + /** */ + TX_XID("--xid"), + + /** */ + TX_KILL("--kill"), + + /** */ + TX_INFO("--info"); + + /** Option name. */ + private final String name; + + /** + * @param name Argument name. + */ + TxCommandArg(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String argName() { + return name; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java new file mode 100644 index 0000000000000..c88e928b55e61 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/TxCommands.java @@ -0,0 +1,590 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + + +package org.apache.ignite.internal.commandline; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.logging.Logger; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.IgniteFeatures; +import org.apache.ignite.internal.IgniteNodeAttributes; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.client.GridClientException; +import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.visor.tx.FetchNearXidVersionTask; +import org.apache.ignite.internal.visor.tx.TxKeyLockType; +import org.apache.ignite.internal.visor.tx.TxMappingType; +import org.apache.ignite.internal.visor.tx.TxVerboseId; +import org.apache.ignite.internal.visor.tx.TxVerboseInfo; +import org.apache.ignite.internal.visor.tx.TxVerboseKey; +import org.apache.ignite.internal.visor.tx.VisorTxInfo; +import org.apache.ignite.internal.visor.tx.VisorTxOperation; +import org.apache.ignite.internal.visor.tx.VisorTxProjection; +import org.apache.ignite.internal.visor.tx.VisorTxSortOrder; +import org.apache.ignite.internal.visor.tx.VisorTxTask; +import org.apache.ignite.internal.visor.tx.VisorTxTaskArg; +import org.apache.ignite.internal.visor.tx.VisorTxTaskResult; +import org.apache.ignite.transactions.TransactionState; + +import static org.apache.ignite.internal.commandline.CommandList.TX; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommandLogger.or; +import static org.apache.ignite.internal.commandline.CommonArgParser.CMD_AUTO_CONFIRMATION; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTask; +import static org.apache.ignite.internal.commandline.TxCommandArg.TX_INFO; + +/** + * Transaction commands. + */ +public class TxCommands implements Command { + /** Arguments */ + private VisorTxTaskArg args; + + /** Logger. */ + private Logger logger; + + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + Command.usage(logger, "List or kill transactions:", TX, getTxOptions()); + Command.usage(logger, "Print detailed information (topology and key lock ownership) about specific transaction:", + TX, TX_INFO.argName(), or("", "")); + + } + + /** + * @return Transaction command options. + */ + private String[] getTxOptions() { + List list = new ArrayList<>(); + + list.add(optional(TxCommandArg.TX_XID, "XID")); + list.add(optional(TxCommandArg.TX_DURATION, "SECONDS")); + list.add(optional(TxCommandArg.TX_SIZE, "SIZE")); + list.add(optional(TxCommandArg.TX_LABEL, "PATTERN_REGEX")); + list.add(optional(or(TxCommandArg.TX_SERVERS, TxCommandArg.TX_CLIENTS))); + list.add(optional(TxCommandArg.TX_NODES, "consistentId1[,consistentId2,....,consistentIdN]")); + list.add(optional(TxCommandArg.TX_LIMIT, "NUMBER")); + list.add(optional(TxCommandArg.TX_ORDER, or(VisorTxSortOrder.values()))); + list.add(optional(TxCommandArg.TX_KILL)); + list.add(optional(TX_INFO)); + list.add(optional(CMD_AUTO_CONFIRMATION)); + + return list.toArray(new String[list.size()]); + } + + /** {@inheritDoc} */ + @Override public VisorTxTaskArg arg() { + return args; + } + + /** + * Dump transactions information. + * + * @param clientCfg Client configuration. + */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + this.logger = logger; + + try (GridClient client = Command.startClient(clientCfg)) { + if (args.getOperation() == VisorTxOperation.INFO) + return transactionInfo(client, clientCfg); + + Map res = executeTask(client, VisorTxTask.class, args, clientCfg); + + if (res.isEmpty()) + logger.info("Nothing found."); + else if (args.getOperation() == VisorTxOperation.KILL) + logger.info("Killed transactions:"); + else + logger.info("Matching transactions:"); + + for (Map.Entry entry : res.entrySet()) { + if (entry.getValue().getInfos().isEmpty()) + continue; + + ClusterNode key = entry.getKey(); + + logger.info(key.getClass().getSimpleName() + " [id=" + key.id() + + ", addrs=" + key.addresses() + + ", order=" + key.order() + + ", ver=" + key.version() + + ", isClient=" + key.isClient() + + ", consistentId=" + key.consistentId() + + "]"); + + for (VisorTxInfo info : entry.getValue().getInfos()) + logger.info(info.toUserString()); + } + + return res; + } + catch (Throwable e) { + logger.severe("Failed to perform operation."); + logger.severe(CommandLogger.errorMessage(e)); + + throw e; + } + } + + /** + * Dump transactions information. + * + * @param client Client. + */ + private void transactions(GridClient client, GridClientConfiguration conf) throws GridClientException { + try { + if (args.getOperation() == VisorTxOperation.INFO) { + transactionInfo(client, conf); + + return; + } + + Map res = executeTask(client, VisorTxTask.class, args, conf); + + for (Map.Entry entry : res.entrySet()) { + if (entry.getValue().getInfos().isEmpty()) + continue; + + ClusterNode key = entry.getKey(); + + logger.info(nodeDescription(key)); + + for (VisorTxInfo info : entry.getValue().getInfos()) + logger.info(info.toUserString()); + } + } + catch (Throwable e) { + logger.severe("Failed to perform operation."); + + throw e; + } + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + if (args.getOperation() == VisorTxOperation.KILL) + return "Warning: the command will kill some transactions."; + + return null; + } + + /** + * @param argIter Argument iterator. + */ + @Override public void parseArguments(CommandArgIterator argIter) { + VisorTxProjection proj = null; + + Integer limit = null; + + VisorTxSortOrder sortOrder = null; + + Long duration = null; + + Integer size = null; + + String lbRegex = null; + + List consistentIds = null; + + VisorTxOperation op = VisorTxOperation.LIST; + + String xid = null; + + TxVerboseId txVerboseId = null; + + while (true) { + String str = argIter.peekNextArg(); + + if (str == null) + break; + + TxCommandArg arg = CommandArgUtils.of(str, TxCommandArg.class); + + if (arg == null) + break; + + switch (arg) { + case TX_LIMIT: + argIter.nextArg(""); + + limit = (int)argIter.nextLongArg(TxCommandArg.TX_LIMIT.toString()); + + break; + + case TX_ORDER: + argIter.nextArg(""); + + sortOrder = VisorTxSortOrder.valueOf(argIter.nextArg(TxCommandArg.TX_ORDER.toString()).toUpperCase()); + + break; + + case TX_SERVERS: + argIter.nextArg(""); + + proj = VisorTxProjection.SERVER; + break; + + case TX_CLIENTS: + argIter.nextArg(""); + + proj = VisorTxProjection.CLIENT; + break; + + case TX_NODES: + argIter.nextArg(""); + + Set ids = argIter.nextStringSet(TxCommandArg.TX_NODES.toString()); + + if (ids.isEmpty()) { + throw new IllegalArgumentException("Consistent id list is empty."); + } + + consistentIds = new ArrayList<>(ids); + break; + + case TX_DURATION: + argIter.nextArg(""); + + duration = argIter.nextLongArg(TxCommandArg.TX_DURATION.toString()) * 1000L; + break; + + case TX_SIZE: + argIter.nextArg(""); + + size = (int)argIter.nextLongArg(TxCommandArg.TX_SIZE.toString()); + break; + + case TX_LABEL: + argIter.nextArg(""); + + lbRegex = argIter.nextArg(TxCommandArg.TX_LABEL.toString()); + + try { + Pattern.compile(lbRegex); + } + catch (PatternSyntaxException ignored) { + throw new IllegalArgumentException("Illegal regex syntax"); + } + + break; + + case TX_XID: + argIter.nextArg(""); + + xid = argIter.nextArg(TxCommandArg.TX_XID.toString()); + break; + + case TX_KILL: + argIter.nextArg(""); + + op = VisorTxOperation.KILL; + break; + + case TX_INFO: + argIter.nextArg(""); + + op = VisorTxOperation.INFO; + + txVerboseId = TxVerboseId.fromString(argIter.nextArg(TX_INFO.argName())); + + break; + + default: + throw new AssertionError(); + } + } + + if (proj != null && consistentIds != null) + throw new IllegalArgumentException("Projection can't be used together with list of consistent ids."); + + this.args = new VisorTxTaskArg(op, limit, duration, size, null, proj, + consistentIds, xid, lbRegex, sortOrder, txVerboseId); + } + + /** + * Provides text descrition of a cluster node. + * + * @param node Node. + */ + private static String nodeDescription(ClusterNode node) { + return node.getClass().getSimpleName() + " [id=" + node.id() + + ", addrs=" + node.addresses() + + ", order=" + node.order() + + ", ver=" + node.version() + + ", isClient=" + node.isClient() + + ", consistentId=" + node.consistentId() + + "]"; + } + + /** + * Executes --tx --info command. + * + * @param client Client. + */ + private Object transactionInfo(GridClient client, GridClientConfiguration conf) throws GridClientException { + checkFeatureSupportedByCluster(client, IgniteFeatures.TX_INFO_COMMAND, true); + + GridCacheVersion nearXidVer = executeTask(client, FetchNearXidVersionTask.class, args.txInfoArgument(), conf); + + boolean histMode = false; + + if (nearXidVer != null) { + logger.info("Resolved transaction near XID version: " + nearXidVer); + + args.txInfoArgument(new TxVerboseId(null, nearXidVer)); + } + else { + logger.info("Active transactions not found."); + + if (args.txInfoArgument().gridCacheVersion() != null) { + logger.info("Will try to peek history to find out whether transaction was committed / rolled back."); + + histMode = true; + } + else { + logger.info("You can specify transaction in GridCacheVersion format in order to peek history " + + "to find out whether transaction was committed / rolled back."); + + return null; + } + } + + Map res = executeTask(client, VisorTxTask.class, args, conf); + + if (histMode) + printTxInfoHistoricalResult(res); + else + printTxInfoResult(res); + + return res; + } + + /** + * Prints result of --tx --info command to output. + * + * @param res Response. + */ + private void printTxInfoResult(Map res) { + String lb = null; + + Map usedCaches = new HashMap<>(); + Map usedCacheGroups = new HashMap<>(); + VisorTxInfo firstInfo = null; + TxVerboseInfo firstVerboseInfo = null; + Set states = new HashSet<>(); + + for (Map.Entry entry : res.entrySet()) { + for (VisorTxInfo info : entry.getValue().getInfos()) { + assert info.getTxVerboseInfo() != null; + + if (lb == null) + lb = info.getLabel(); + + if (firstInfo == null) { + firstInfo = info; + firstVerboseInfo = info.getTxVerboseInfo(); + } + + usedCaches.putAll(info.getTxVerboseInfo().usedCaches()); + usedCacheGroups.putAll(info.getTxVerboseInfo().usedCacheGroups()); + states.add(info.getState()); + } + } + + String indent = ""; + + logger.info(""); + logger.info(indent + "Transaction detailed info:"); + + printTransactionDetailedInfo( + res, usedCaches, usedCacheGroups, firstInfo, firstVerboseInfo, states, indent + DOUBLE_INDENT); + } + + /** + * Prints detailed info about transaction to output. + * + * @param res Response. + * @param usedCaches Used caches. + * @param usedCacheGroups Used cache groups. + * @param firstInfo First info. + * @param firstVerboseInfo First verbose info. + * @param states States. + * @param indent Indent. + */ + private void printTransactionDetailedInfo(Map res, Map usedCaches, + Map usedCacheGroups, VisorTxInfo firstInfo, TxVerboseInfo firstVerboseInfo, + Set states, String indent) { + logger.info(indent + "Near XID version: " + firstVerboseInfo.nearXidVersion()); + logger.info(indent + "Near XID version (UUID): " + firstInfo.getNearXid()); + logger.info(indent + "Isolation: " + firstInfo.getIsolation()); + logger.info(indent + "Concurrency: " + firstInfo.getConcurrency()); + logger.info(indent + "Timeout: " + firstInfo.getTimeout()); + logger.info(indent + "Initiator node: " + firstVerboseInfo.nearNodeId()); + logger.info(indent + "Initiator node (consistent ID): " + firstVerboseInfo.nearNodeConsistentId()); + logger.info(indent + "Label: " + firstInfo.getLabel()); + logger.info(indent + "Topology version: " + firstInfo.getTopologyVersion()); + logger.info(indent + "Used caches (ID to name): " + usedCaches); + logger.info(indent + "Used cache groups (ID to name): " + usedCacheGroups); + logger.info(indent + "States across the cluster: " + states); + logger.info(indent + "Transaction topology: "); + + printTransactionTopology(res, indent + DOUBLE_INDENT); + } + + /** + * Prints transaction topology to output. + * + * @param res Response. + * @param indent Indent. + */ + private void printTransactionTopology(Map res, String indent) { + for (Map.Entry entry : res.entrySet()) { + logger.info(indent + nodeDescription(entry.getKey()) + ':'); + + printTransactionMappings(indent + DOUBLE_INDENT, entry); + } + } + + /** + * Prints transaction mappings for specific cluster node to output. + * + * @param indent Indent. + * @param entry Entry. + */ + private void printTransactionMappings(String indent, Map.Entry entry) { + for (VisorTxInfo info : entry.getValue().getInfos()) { + TxVerboseInfo verboseInfo = info.getTxVerboseInfo(); + + if (verboseInfo != null) { + logger.info(indent + "Mapping [type=" + verboseInfo.txMappingType() + "]:"); + + printTransactionMapping(indent + DOUBLE_INDENT, info, verboseInfo); + } + else { + logger.info(indent + "Mapping [type=HISTORICAL]:"); + + logger.info(indent + DOUBLE_INDENT + "State: " + info.getState()); + } + } + } + + /** + * Prints specific transaction mapping to output. + * + * @param indent Indent. + * @param info Info. + * @param verboseInfo Verbose info. + */ + private void printTransactionMapping(String indent, VisorTxInfo info, TxVerboseInfo verboseInfo) { + logger.info(indent + "XID version (UUID): " + info.getXid()); + logger.info(indent + "State: " + info.getState()); + + if (verboseInfo.txMappingType() == TxMappingType.REMOTE) { + logger.info(indent + "Primary node: " + verboseInfo.dhtNodeId()); + logger.info(indent + "Primary node (consistent ID): " + verboseInfo.dhtNodeConsistentId()); + } + + if (!F.isEmpty(verboseInfo.localTxKeys())) { + logger.info(indent + "Mapped keys:"); + + printTransactionKeys(indent + DOUBLE_INDENT, verboseInfo); + } + } + + /** + * Prints keys of specific transaction mapping to output. + * + * @param indent Indent. + * @param verboseInfo Verbose info. + */ + private void printTransactionKeys(String indent, TxVerboseInfo verboseInfo) { + for (TxVerboseKey txVerboseKey : verboseInfo.localTxKeys()) { + logger.info(indent + (txVerboseKey.read() ? "Read" : "Write") + + " [lock=" + txVerboseKey.lockType() + "]: " + txVerboseKey.txKey()); + + if (txVerboseKey.lockType() == TxKeyLockType.AWAITS_LOCK) + logger.info(indent + DOUBLE_INDENT + "Lock owner XID: " + txVerboseKey.ownerVersion()); + } + } + + /** + * Prints results of --tx --info to output in case requested transaction is not active. + * + * @param res Response. + */ + private void printTxInfoHistoricalResult(Map res) { + if (F.isEmpty(res)) + logger.info("Transaction was not found in history across the cluster."); + else { + logger.info("Transaction was found in completed versions history of the following nodes:"); + + for (Map.Entry entry : res.entrySet()) { + logger.info(DOUBLE_INDENT + nodeDescription(entry.getKey()) + ':'); + logger.info(DOUBLE_INDENT + DOUBLE_INDENT + "State: " + entry.getValue().getInfos().get(0).getState()); + } + } + } + + /** + * Checks that all cluster nodes support specified feature. + * + * @param client Client. + * @param feature Feature. + * @param validateClientNodes Whether client nodes should be checked as well. + */ + private static void checkFeatureSupportedByCluster( + GridClient client, + IgniteFeatures feature, + boolean validateClientNodes + ) throws GridClientException { + Collection nodes = validateClientNodes ? + client.compute().nodes() : + client.compute().nodes(GridClientNode::connectable); + + for (GridClientNode node : nodes) { + byte[] featuresAttrBytes = node.attribute(IgniteNodeAttributes.ATTR_IGNITE_FEATURES); + + if (!IgniteFeatures.nodeSupports(featuresAttrBytes, feature)) { + throw new IllegalStateException("Failed to execute command: cluster contains node that " + + "doesn't support feature [nodeId=" + node.nodeId() + ", feature=" + feature + ']'); + } + } + } + + /** {@inheritDoc} */ + @Override public String name() { + return TX.toCommandName(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java new file mode 100644 index 0000000000000..2ac9c8794d896 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/WalCommands.java @@ -0,0 +1,288 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.logging.Logger; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.T2; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.misc.VisorClusterNode; +import org.apache.ignite.internal.visor.misc.VisorWalTask; +import org.apache.ignite.internal.visor.misc.VisorWalTaskArg; +import org.apache.ignite.internal.visor.misc.VisorWalTaskOperation; +import org.apache.ignite.internal.visor.misc.VisorWalTaskResult; + +import static org.apache.ignite.IgniteSystemProperties.IGNITE_ENABLE_EXPERIMENTAL_COMMAND; +import static org.apache.ignite.internal.commandline.CommandArgIterator.isCommandOrOption; +import static org.apache.ignite.internal.commandline.CommandHandler.UTILITY_NAME; +import static org.apache.ignite.internal.commandline.CommandList.WAL; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommonArgParser.CMD_AUTO_CONFIRMATION; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTask; + +/** + * Wal commands. + */ +public class WalCommands implements Command> { + /** */ + static final String WAL_PRINT = "print"; + + /** */ + static final String WAL_DELETE = "delete"; + + /** */ + private Logger logger; + + /** + * Wal action. + */ + private String walAct; + + /** + * Wal arguments. + */ + private String walArgs; + + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + if (!enableExperimental()) + return; + + Command.usage(logger, "Print absolute paths of unused archived wal segments on each node:", WAL, + WAL_PRINT, "[consistentId1,consistentId2,....,consistentIdN]"); + Command.usage(logger, "Delete unused archived wal segments on each node:", WAL, WAL_DELETE, + "[consistentId1,consistentId2,....,consistentIdN]", optional(CMD_AUTO_CONFIRMATION)); + } + + /** + * Execute WAL command. + * + * @param clientCfg Client configuration. + * @throws Exception If failed to execute wal action. + */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + if (enableExperimental()) { + this.logger = logger; + + try (GridClient client = Command.startClient(clientCfg)) { + switch (walAct) { + case WAL_DELETE: + deleteUnusedWalSegments(client, walArgs, clientCfg); + + break; + + case WAL_PRINT: + default: + printUnusedWalSegments(client, walArgs, clientCfg); + + break; + } + } + } else { + logger.warning(String.format("For use experimental command add %s=true to JVM_OPTS in %s", + IGNITE_ENABLE_EXPERIMENTAL_COMMAND, UTILITY_NAME)); + } + + return null; + } + + /** {@inheritDoc} */ + @Override public String confirmationPrompt() { + if (WAL_DELETE.equals(walAct)) + return "Warning: the command will delete unused WAL segments."; + + return null; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + String str = argIter.nextArg("Expected arguments for " + WAL.text()); + + String walAct = str.toLowerCase(); + + if (WAL_PRINT.equals(walAct) || WAL_DELETE.equals(walAct)) { + String walArgs = (str = argIter.peekNextArg()) != null && !isCommandOrOption(str) + ? argIter.nextArg("Unexpected argument for " + WAL.text() + ": " + walAct) + : ""; + + if (enableExperimental()) { + this.walAct = walAct; + this.walArgs = walArgs; + } + } + else + throw new IllegalArgumentException("Unexpected action " + walAct + " for " + WAL.text()); + } + + /** + * @return Tuple where first string is wal action, second - wal arguments. + */ + @Override public T2 arg() { + return new T2<>(walAct, walArgs); + } + + /** + * Execute delete unused WAL segments task. + * @param client Client. + * @param walArgs WAL args. + * @param clientCfg Client configuration. + */ + private void deleteUnusedWalSegments( + GridClient client, + String walArgs, + GridClientConfiguration clientCfg + ) throws Exception { + VisorWalTaskResult res = executeTask(client, VisorWalTask.class, + walArg(VisorWalTaskOperation.DELETE_UNUSED_WAL_SEGMENTS, walArgs), clientCfg); + printDeleteWalSegments0(res); + } + + /** + * Execute print unused WAL segments task. + * @param client Client. + * @param walArgs Wal args. + * @param clientCfg Client configuration. + */ + private void printUnusedWalSegments( + GridClient client, + String walArgs, + GridClientConfiguration clientCfg + ) throws Exception { + VisorWalTaskResult res = executeTask(client, VisorWalTask.class, + walArg(VisorWalTaskOperation.PRINT_UNUSED_WAL_SEGMENTS, walArgs), clientCfg); + printUnusedWalSegments0(res); + } + + /** + * Prepare WAL task argument. + * + * @param op Operation. + * @param s Argument from command line. + * @return Task argument. + */ + private VisorWalTaskArg walArg(VisorWalTaskOperation op, String s) { + List consistentIds = null; + + if (!F.isEmpty(s)) { + consistentIds = new ArrayList<>(); + + for (String consistentId : s.split(",")) + consistentIds.add(consistentId.trim()); + } + + switch (op) { + case DELETE_UNUSED_WAL_SEGMENTS: + case PRINT_UNUSED_WAL_SEGMENTS: + return new VisorWalTaskArg(op, consistentIds); + + default: + return new VisorWalTaskArg(VisorWalTaskOperation.PRINT_UNUSED_WAL_SEGMENTS, consistentIds); + } + + } + + /** + * Print list of unused wal segments. + * + * @param taskRes Task result with baseline topology. + */ + private void printUnusedWalSegments0(VisorWalTaskResult taskRes) { + logger.info("Unused wal segments per node:"); + logger.info(""); + + Map> res = taskRes.results(); + Map failRes = taskRes.exceptions(); + Map nodesInfo = taskRes.getNodesInfo(); + + for (Map.Entry> entry : res.entrySet()) { + VisorClusterNode node = nodesInfo.get(entry.getKey()); + + logger.info("Node=" + node.getConsistentId()); + logger.info(DOUBLE_INDENT + "addresses " + U.addressesAsString(node.getAddresses(), node.getHostNames())); + + for (String fileName : entry.getValue()) + logger.info(INDENT + fileName); + + logger.info(""); + } + + for (Map.Entry entry : failRes.entrySet()) { + VisorClusterNode node = nodesInfo.get(entry.getKey()); + + logger.info("Node=" + node.getConsistentId()); + logger.info(DOUBLE_INDENT + "addresses " + U.addressesAsString(node.getAddresses(), node.getHostNames())); + logger.info(INDENT + "failed with error: " + entry.getValue().getMessage()); + logger.info(""); + } + } + + /** + * Print list of unused wal segments. + * + * @param taskRes Task result with baseline topology. + */ + private void printDeleteWalSegments0(VisorWalTaskResult taskRes) { + logger.info("WAL segments deleted for nodes:"); + logger.info(""); + + Map> res = taskRes.results(); + Map errors = taskRes.exceptions(); + Map nodesInfo = taskRes.getNodesInfo(); + + for (Map.Entry> entry : res.entrySet()) { + VisorClusterNode node = nodesInfo.get(entry.getKey()); + + logger.info("Node=" + node.getConsistentId()); + logger.info(DOUBLE_INDENT + "addresses " + U.addressesAsString(node.getAddresses(), node.getHostNames())); + logger.info(""); + } + + for (Map.Entry entry : errors.entrySet()) { + VisorClusterNode node = nodesInfo.get(entry.getKey()); + + logger.info("Node=" + node.getConsistentId()); + logger.info(DOUBLE_INDENT + "addresses " + U.addressesAsString(node.getAddresses(), node.getHostNames())); + logger.info(INDENT + "failed with error: " + entry.getValue().getMessage()); + logger.info(""); + } + } + + /** {@inheritDoc} */ + @Override public String name() { + return WAL.toCommandName(); + } + + /** + * @return Value of {@link IgniteSystemProperties#IGNITE_ENABLE_EXPERIMENTAL_COMMAND} + */ + private boolean enableExperimental() { + return IgniteSystemProperties.getBoolean(IGNITE_ENABLE_EXPERIMENTAL_COMMAND, false); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/argument/CommandArg.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/argument/CommandArg.java new file mode 100644 index 0000000000000..0665ddabe61c4 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/argument/CommandArg.java @@ -0,0 +1,28 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.argument; + +/** + * Command argument interface. + */ +public interface CommandArg { + /** + * @return Argument name. + */ + String argName(); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/argument/CommandArgUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/argument/CommandArgUtils.java new file mode 100644 index 0000000000000..6cd16f17c0a14 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/argument/CommandArgUtils.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.argument; + +import org.jetbrains.annotations.Nullable; + +/** + * Utility class for control.sh arguments. + */ +public class CommandArgUtils { + /** + * Tries convert {@code text} to one of values {@code enumClass}. + * @param text Input test. + * @param enumClass {@link CommandArg} enum class. + * @param + * @return Converted argument or {@code null} if convert failed. + */ + public static & CommandArg> @Nullable E of(String text, Class enumClass) { + for (E e : enumClass.getEnumConstants()) { + if (e.argName().equalsIgnoreCase(text)) + return e; + } + + return null; + } + + /** Private constructor. */ + private CommandArgUtils() { + /* No-op. */ + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineArguments.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineArguments.java new file mode 100644 index 0000000000000..22938856cc061 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineArguments.java @@ -0,0 +1,176 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline.baseline; + +import java.util.List; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * This class contains all possible arguments after parsing baseline command input. + */ +public class BaselineArguments { + /** Command. */ + private BaselineSubcommands cmd; + /** + * {@code true} if auto-adjust should be enable, {@code false} if it should be disable, {@code null} if no operation + * needed. + */ + private Boolean enableAutoAdjust; + /** New value of soft timeout. */ + private Long softBaselineTimeout; + /** Requested topology version. */ + private long topVer = -1; + /** List of consistent ids for operation. */ + @GridToStringInclude + List consistentIds; + + /** + * @param cmd Command. + * @param enableAutoAdjust Auto-adjust enabled feature. + * @param softBaselineTimeout New value of soft timeout. + * @param topVer Requested topology version. + * @param consistentIds List of consistent ids for operation. + */ + public BaselineArguments(BaselineSubcommands cmd, Boolean enableAutoAdjust, Long softBaselineTimeout, + long topVer, List consistentIds) { + this.cmd = cmd; + this.enableAutoAdjust = enableAutoAdjust; + this.softBaselineTimeout = softBaselineTimeout; + this.topVer = topVer; + this.consistentIds = consistentIds; + } + + /** + * @return Command. + */ + public BaselineSubcommands getCmd() { + return cmd; + } + + /** + * @return {@code true} if auto-adjust should be enable, {@code false} if it should be disable, {@code null} if no + * operation needed. + */ + public Boolean getEnableAutoAdjust() { + return enableAutoAdjust; + } + + /** + * @return New value of soft timeout. + */ + public Long getSoftBaselineTimeout() { + return softBaselineTimeout; + } + + /** + * @return Requested topology version. + */ + public long getTopVer() { + return topVer; + } + + /** + * @return List of consistent ids for operation. + */ + public List getConsistentIds() { + return consistentIds; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(BaselineArguments.class, this); + } + + /** + * Builder of {@link BaselineArguments}. + */ + public static class Builder { + /** Command. */ + private BaselineSubcommands cmd; + /** + * {@code true} if auto-adjust should be enable, {@code false} if it should be disable, {@code null} if no + * operation needed. + */ + private Boolean enable; + /** New value of soft timeout. */ + private Long timeout; + /** Requested topology version. */ + private long ver = -1; + /** List of consistent ids for operation. */ + private List ids; + + /** + * @param cmd Command. + */ + public Builder(BaselineSubcommands cmd) { + this.cmd = cmd; + } + + /** + * @param enable {@code true} if auto-adjust should be enable, {@code false} if it should be disable, {@code + * null} if no operation needed. + * @return This instance for chaining. + */ + public Builder withEnable(Boolean enable) { + this.enable = enable; + + return this; + } + + /** + * @param timeout New value of soft timeout. + * @return This instance for chaining. + */ + public Builder withSoftBaselineTimeout(Long timeout) { + this.timeout = timeout; + + return this; + } + + /** + * @param ver Requested topology version. + * @return This instance for chaining. + */ + public Builder withTopVer(long ver) { + this.ver = ver; + + return this; + } + + /** + * @param ids List of consistent ids for operation. + * @return This instance for chaining. + */ + public Builder withConsistentIds(List ids) { + this.ids = ids; + + return this; + } + + /** + * @return {@link BaselineArguments}. + */ + public BaselineArguments build() { + return new BaselineArguments(cmd, enable, timeout, ver, ids); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineSubcommands.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineSubcommands.java new file mode 100644 index 0000000000000..e0f656e1198a1 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/baseline/BaselineSubcommands.java @@ -0,0 +1,116 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline.baseline; + +import org.apache.ignite.internal.visor.baseline.VisorBaselineOperation; +import org.jetbrains.annotations.Nullable; + +/** + * Set of baseline commands. + */ +public enum BaselineSubcommands { + /** + * Add nodes to baseline. + */ + ADD("add", VisorBaselineOperation.ADD), + + /** + * Remove nodes from baseline. + */ + REMOVE("remove", VisorBaselineOperation.REMOVE), + + /** + * Collect information about baseline. + */ + COLLECT("collect", VisorBaselineOperation.COLLECT), + + /** + * Set new baseline. + */ + SET("set", VisorBaselineOperation.SET), + + /** + * Check current topology version. + */ + VERSION("version", VisorBaselineOperation.VERSION), + + ; + + /** Enumerated values. */ + private static final BaselineSubcommands[] VALS = values(); + + /** Name. */ + private final String name; + + /** Corresponding visor baseline operation. */ + private final VisorBaselineOperation visorBaselineOperation; + + /** + * @param name Name. + * @param operation + */ + BaselineSubcommands(String name, VisorBaselineOperation operation) { + this.name = name; + visorBaselineOperation = operation; + } + + /** + * @param text Command text. + * @return Command for the text. + */ + public static BaselineSubcommands of(String text) { + for (BaselineSubcommands cmd : BaselineSubcommands.values()) { + if (cmd.text().equalsIgnoreCase(text)) + return cmd; + } + + return null; + } + + /** + * @return Name. + */ + public String text() { + return name; + } + + /** + * @return {@link VisorBaselineOperation} which is associated with baseline subcommand. + */ + public VisorBaselineOperation visorBaselineOperation() { + return visorBaselineOperation; + } + + /** + * Efficiently gets enumerated value from its ordinal. + * + * @param ord Ordinal value. + * @return Enumerated value or {@code null} if ordinal out of range. + */ + @Nullable public static BaselineSubcommands fromOrdinal(int ord) { + return ord >= 0 && ord < VALS.length ? VALS[ord] : null; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommandList.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommandList.java new file mode 100644 index 0000000000000..86bd2695fb221 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommandList.java @@ -0,0 +1,127 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.ignite.internal.commandline.cache; + +import org.apache.ignite.internal.commandline.Command; +import org.jetbrains.annotations.Nullable; + +/** + * + */ +public enum CacheCommandList { + /** + * Prints out help for the cache command. + */ + HELP("help", null), + + /** + * Checks consistency of primary and backup partitions assuming no concurrent updates are happening in the cluster. + */ + IDLE_VERIFY("idle_verify", new IdleVerify()), + + /** + * Prints info regarding caches, groups or sequences. + */ + LIST("list", new CacheViewer()), + + /** + * Validates indexes attempting to read each indexed entry. + */ + VALIDATE_INDEXES("validate_indexes", new CacheValidateIndexes()), + + /** + * Prints info about contended keys (the keys concurrently locked from multiple transactions). + */ + CONTENTION("contention", new CacheContention()), + + /** + * Collect information on the distribution of partitions. + */ + DISTRIBUTION("distribution", new CacheDistribution()), + + /** + * Reset lost partitions + */ + RESET_LOST_PARTITIONS("reset_lost_partitions", new ResetLostPartitions()), + + /** + * Find and remove garbage. + */ + FIND_AND_DELETE_GARBAGE("find_garbage", new FindAndDeleteGarbage()); + + + /** Enumerated values. */ + private static final CacheCommandList[] VALS = values(); + + /** Name. */ + private final String name; + + /** */ + private final Command command; + + /** + * @param name Name. + * @param command Command implementation. + */ + CacheCommandList(String name, Command command) { + this.name = name; + this.command = command; + } + + /** + * @param text Command text. + * @return Command for the text. + */ + public static CacheCommandList of(String text) { + for (CacheCommandList cmd : CacheCommandList.values()) { + if (cmd.text().equalsIgnoreCase(text)) + return cmd; + } + + return null; + } + + /** + * @return Name. + */ + public String text() { + return name; + } + + /** + * @return Cache subcommand implementation. + */ + public Command subcommand() { + return command; + } + + /** + * Efficiently gets enumerated value from its ordinal. + * + * @param ord Ordinal value. + * @return Enumerated value or {@code null} if ordinal out of range. + */ + @Nullable public static CacheCommandList fromOrdinal(int ord) { + return ord >= 0 && ord < VALS.length ? VALS[ord] : null; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java new file mode 100644 index 0000000000000..0c00fbbbac948 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheCommands.java @@ -0,0 +1,221 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline.cache; + +import java.util.Arrays; +import java.util.Comparator; +import java.util.Map; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.SB; + +import static org.apache.ignite.internal.commandline.CommandHandler.UTILITY_NAME; +import static org.apache.ignite.internal.commandline.CommandList.CACHE; +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommonArgParser.getCommonOptions; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.CONTENTION; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.FIND_AND_DELETE_GARBAGE; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.HELP; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.LIST; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.VALIDATE_INDEXES; + +/** + * High-level "cache" command implementation. + */ +public class CacheCommands implements Command { + /** */ + protected static final String NODE_ID = "nodeId"; + + /** */ + protected static final String OP_NODE_ID = optional(NODE_ID); + + /** */ + private CacheSubcommands subcommand; + + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + logger.info(INDENT + "View caches information in a cluster. For more details type:"); + logger.info(DOUBLE_INDENT + CommandLogger.join(" ", UTILITY_NAME, CACHE, HELP)); + logger.info(""); + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + if (subcommand == CacheSubcommands.HELP) { + printCacheHelp(logger); + + return null; + } + + Command command = subcommand.subcommand(); + + if (command == null) + throw new IllegalStateException("Unknown command " + subcommand); + + return command.execute(clientCfg, logger); + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + if (!argIter.hasNextSubArg()) { + throw new IllegalArgumentException("Arguments are expected for --cache subcommand, " + + "run '--cache help' for more info."); + } + + String str = argIter.nextArg("").toLowerCase(); + + CacheSubcommands cmd = CacheSubcommands.of(str); + + if (cmd == null) + cmd = CacheSubcommands.HELP; + + switch (cmd) { + case HELP: + break; + + case RESET_LOST_PARTITIONS: + case LIST: + case IDLE_VERIFY: + case VALIDATE_INDEXES: + case FIND_AND_DELETE_GARBAGE: + case CONTENTION: + case DISTRIBUTION: + cmd.subcommand().parseArguments(argIter); + + break; + + default: + throw new IllegalArgumentException("Unknown --cache subcommand " + cmd); + } + + if (argIter.hasNextSubArg()) + throw new IllegalArgumentException("Unexpected argument of --cache subcommand: " + argIter.peekNextArg()); + + this.subcommand = cmd; + } + + + /** */ + private void printCacheHelp(Logger logger) { + logger.info(INDENT + "The '" + CACHE + " subcommand' is used to get information about and perform actions" + + " with caches. The command has the following syntax:"); + logger.info(""); + logger.info(INDENT + CommandLogger.join(" ", UTILITY_NAME, CommandLogger.join(" ", getCommonOptions())) + " " + + CACHE + " [subcommand] "); + logger.info(""); + logger.info(INDENT + "The subcommands that take " + OP_NODE_ID + " as an argument ('" + LIST + "', '" + + FIND_AND_DELETE_GARBAGE+ "', '" + CONTENTION + "' and '" + VALIDATE_INDEXES + + "') will be executed on the given node or on all server nodes" + + " if the option is not specified. Other commands will run on a random server node."); + logger.info(""); + logger.info(""); + logger.info(INDENT + "Subcommands:"); + + Arrays.stream(CacheCommandList.values()).forEach(c -> { + if (c.subcommand() != null) c.subcommand().printUsage(logger); + }); + + logger.info(""); + } + + + /** + * Print cache command usage with default indention. + * + * @param logger Logger to use. + * @param cmd Cache command. + * @param description Command description. + * @param paramsDesc Parameter desciptors. + * @param args Cache command arguments. + */ + protected static void usageCache( + Logger logger, + CacheSubcommands cmd, + String description, + Map paramsDesc, + String... args + ) { + logger.info(""); + logger.info(INDENT + CommandLogger.join(" ", CACHE, cmd, CommandLogger.join(" ", args))); + logger.info(DOUBLE_INDENT + description); + + if (!F.isEmpty(paramsDesc)) { + logger.info(""); + logger.info(DOUBLE_INDENT + "Parameters:"); + + usageCacheParams(paramsDesc, DOUBLE_INDENT + INDENT, logger); + } + } + + /** + * Print cache command arguments usage. + * + * @param paramsDesc Cache command arguments description. + * @param indent Indent string. + * @param logger Logger to use. + */ + private static void usageCacheParams(Map paramsDesc, String indent, Logger logger) { + int maxParamLen = paramsDesc.keySet().stream().max(Comparator.comparingInt(String::length)).get().length(); + + for (Map.Entry param : paramsDesc.entrySet()) + logger.info(indent + extendToLen(param.getKey(), maxParamLen) + " " + "- " + param.getValue()); + } + + /** + * Appends spaces to end of input string for extending to needed length. + * + * @param s Input string. + * @param targetLen Needed length. + * @return String with appended spaces on the end. + */ + private static String extendToLen(String s, int targetLen) { + assert targetLen >= 0; + assert s.length() <= targetLen; + + if (s.length() == targetLen) + return s; + + SB sb = new SB(targetLen); + + sb.a(s); + + for (int i = 0; i < targetLen - s.length(); i++) + sb.a(" "); + + return sb.toString(); + } + + /** {@inheritDoc} */ + @Override public CacheSubcommands arg() { + return subcommand; + } + + /** {@inheritDoc} */ + @Override public String name() { + return CACHE.toCommandName(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java new file mode 100644 index 0000000000000..10c615d9cfa6f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheContention.java @@ -0,0 +1,155 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + + +package org.apache.ignite.internal.commandline.cache; + +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.processors.cache.verify.ContentionInfo; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.visor.verify.VisorContentionTask; +import org.apache.ignite.internal.visor.verify.VisorContentionTaskArg; +import org.apache.ignite.internal.visor.verify.VisorContentionTaskResult; + +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.OP_NODE_ID; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.usageCache; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.CONTENTION; + +/** + * Cache contention detection subcommand. + */ +public class CacheContention implements Command { + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + String description = "Show the keys that are point of contention for multiple transactions."; + + usageCache(logger, CONTENTION, description, null, "minQueueSize", + OP_NODE_ID, optional("maxPrint")); + } + + /** + * Container for command arguments. + */ + public class Arguments { + /** Node id. */ + private UUID nodeId; + + /** Min queue size. */ + private int minQueueSize; + + /** Max print. */ + private int maxPrint; + + /** + * + */ + public Arguments(UUID nodeId, int minQueueSize, int maxPrint) { + this.nodeId = nodeId; + this.minQueueSize = minQueueSize; + this.maxPrint = maxPrint; + } + + /** + * @return Node id. + */ + public UUID nodeId() { + return nodeId; + } + + /** + * @return Min queue size. + */ + public int minQueueSize() { + return minQueueSize; + } + /** + * @return Max print. + */ + public int maxPrint() { + return maxPrint; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } + } + + /** + * Command parsed arguments. + */ + private Arguments args; + + /** {@inheritDoc} */ + @Override public Arguments arg() { + return args; + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + VisorContentionTaskArg taskArg = new VisorContentionTaskArg(args.minQueueSize(), args.maxPrint()); + + UUID nodeId = args.nodeId() == null ? BROADCAST_UUID : args.nodeId(); + + VisorContentionTaskResult res; + + try (GridClient client = Command.startClient(clientCfg);) { + res = executeTaskByNameOnNode(client, VisorContentionTask.class.getName(), taskArg, nodeId, clientCfg); + } + + CommandLogger.printErrors(res.exceptions(), "Contention check failed on nodes:", logger); + + for (ContentionInfo info : res.getInfos()) + info.print(logger); + + return res; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + int minQueueSize = Integer.parseInt(argIter.nextArg("Min queue size expected")); + + UUID nodeId = null; + + if (argIter.hasNextSubArg()) + nodeId = UUID.fromString(argIter.nextArg("")); + + int maxPrint = 10; + + if (argIter.hasNextSubArg()) + maxPrint = Integer.parseInt(argIter.nextArg("")); + + args = new Arguments(nodeId, minQueueSize, maxPrint); + } + + /** {@inheritDoc} */ + @Override public String name() { + return CONTENTION.text().toUpperCase(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java new file mode 100644 index 0000000000000..64552d1b3f584 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheDistribution.java @@ -0,0 +1,179 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + + +package org.apache.ignite.internal.commandline.cache; + +import java.util.HashSet; +import java.util.Set; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandHandler; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.commandline.cache.argument.DistributionCommandArg; +import org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTask; +import org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTaskArg; +import org.apache.ignite.internal.commandline.cache.distribution.CacheDistributionTaskResult; +import org.apache.ignite.internal.util.typedef.internal.S; + +import static org.apache.ignite.internal.commandline.CommandHandler.NULL; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommandLogger.or; +import static org.apache.ignite.internal.commandline.TaskExecutor.BROADCAST_UUID; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.NODE_ID; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.usageCache; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.DISTRIBUTION; +import static org.apache.ignite.internal.commandline.cache.argument.DistributionCommandArg.USER_ATTRIBUTES; + +/** + * Would collect and print info about how data is spread between nodes and partitions. + */ +public class CacheDistribution implements Command { + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + String CACHES = "cacheName1,...,cacheNameN"; + String description = "Prints the information about partition distribution."; + + usageCache(logger, DISTRIBUTION, description, null, + or(NODE_ID, CommandHandler.NULL), optional(CACHES), optional(USER_ATTRIBUTES, "attrName1,...,attrNameN")); + } + + /** + * Container for command arguments. + */ + public class Arguments { + /** Caches. */ + private Set caches; + + /** Node id. */ + private UUID nodeId; + + /** Additional user attributes in result. Set of attribute names whose values will be searched in ClusterNode.attributes(). */ + private Set userAttributes; + + /** + * + */ + public Arguments(Set caches, UUID nodeId, Set userAttributes) { + this.caches = caches; + this.nodeId = nodeId; + this.userAttributes = userAttributes; + } + + /** + * @return Caches. + */ + public Set caches() { + return caches; + } + + /** + * @return Node id. + */ + public UUID nodeId() { + return nodeId; + } + + /** + * @return Additional user attributes in result. Set of attribute names whose values will be searched in ClusterNode.attributes(). + */ + public Set getUserAttributes() { + return userAttributes; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } + } + + /** Command parsed arguments */ + private Arguments args; + + /** {@inheritDoc} */ + @Override public Arguments arg() { + return args; + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + CacheDistributionTaskArg taskArg = new CacheDistributionTaskArg(args.caches(), args.getUserAttributes()); + + UUID nodeId = args.nodeId() == null ? BROADCAST_UUID : args.nodeId(); + + CacheDistributionTaskResult res; + + try (GridClient client = Command.startClient(clientCfg)) { + res = executeTaskByNameOnNode(client, CacheDistributionTask.class.getName(), taskArg, nodeId, clientCfg); + } + + CommandLogger.printErrors(res.exceptions(), "Cache distrubution task failed on nodes:", logger); + + res.print(System.out::println); + + return res; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + UUID nodeId = null; + Set caches = null; + Set userAttributes = null; + + String nodeIdStr = argIter.nextArg("Node id expected or null"); + + if (!NULL.equals(nodeIdStr)) + nodeId = UUID.fromString(nodeIdStr); + + while (argIter.hasNextSubArg()) { + String nextArg = argIter.nextArg(""); + + DistributionCommandArg arg = CommandArgUtils.of(nextArg, DistributionCommandArg.class); + + if (arg == USER_ATTRIBUTES) { + nextArg = argIter.nextArg("User attributes are expected to be separated by commas"); + + userAttributes = new HashSet<>(); + + for (String userAttribute : nextArg.split(",")) + userAttributes.add(userAttribute.trim()); + + nextArg = (argIter.hasNextSubArg()) ? argIter.nextArg("") : null; + + } + + if (nextArg != null) + caches = argIter.parseStringSet(nextArg); + } + + args = new Arguments(caches, nodeId, userAttributes); + } + + /** {@inheritDoc} */ + @Override public String name() { + return DISTRIBUTION.text().toUpperCase(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheSubcommands.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheSubcommands.java new file mode 100644 index 0000000000000..298a8820652c4 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheSubcommands.java @@ -0,0 +1,151 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline.cache; + +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.argument.CommandArg; +import org.apache.ignite.internal.commandline.cache.argument.DistributionCommandArg; +import org.apache.ignite.internal.commandline.cache.argument.FindAndDeleteGarbageArg; +import org.apache.ignite.internal.commandline.cache.argument.IdleVerifyCommandArg; +import org.apache.ignite.internal.commandline.cache.argument.ListCommandArg; +import org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg; +import org.jetbrains.annotations.Nullable; + +/** + * + */ +public enum CacheSubcommands { + /** + * Prints out help for the cache command. + */ + HELP("help", null, null), + + /** + * Checks consistency of primary and backup partitions assuming no concurrent updates are happening in the cluster. + */ + IDLE_VERIFY("idle_verify", IdleVerifyCommandArg.class, new IdleVerify()), + + /** + * Prints info regarding caches, groups or sequences. + */ + LIST("list", ListCommandArg.class, new CacheViewer()), + + /** + * Validates indexes attempting to read each indexed entry. + */ + VALIDATE_INDEXES("validate_indexes", ValidateIndexesCommandArg.class, new CacheValidateIndexes()), + + /** + * Prints info about contended keys (the keys concurrently locked from multiple transactions). + */ + CONTENTION("contention", null, new CacheContention()), + + /** + * Collect information on the distribution of partitions. + */ + DISTRIBUTION("distribution", DistributionCommandArg.class, new CacheDistribution()), + + /** + * Reset lost partitions + */ + RESET_LOST_PARTITIONS("reset_lost_partitions", null, new ResetLostPartitions()), + + /** + * Find and remove garbage. + */ + FIND_AND_DELETE_GARBAGE("find_garbage", FindAndDeleteGarbageArg.class, new FindAndDeleteGarbage()); + + + /** Enumerated values. */ + private static final CacheSubcommands[] VALS = values(); + + /** Enum class with argument list for command. */ + private final Class> commandArgs; + + /** Name. */ + private final String name; + + /** Command instance for certain type. */ + private final Command command; + + /** + * @param name Name. + * @param command Command realization. + */ + CacheSubcommands( + String name, + Class> commandArgs, + Command command + ) { + this.name = name; + this.commandArgs = commandArgs; + this.command = command; + } + + /** + * @param text Command text. + * @return Command for the text. + */ + public static CacheSubcommands of(String text) { + for (CacheSubcommands cmd : CacheSubcommands.values()) { + if (cmd.text().equalsIgnoreCase(text)) + return cmd; + } + + return null; + } + + /** + * @return Name. + */ + public String text() { + return name; + } + + /** + * @return Subcommand realization. + */ + public Command subcommand() { + return command; + } + + /** + * @return Enum class with argument list for command. + */ + public Class> getCommandArgs() { + return commandArgs; + } + + /** + * Efficiently gets enumerated value from its ordinal. + * + * @param ord Ordinal value. + * @return Enumerated value or {@code null} if ordinal out of range. + */ + @Nullable public static CacheSubcommands fromOrdinal(int ord) { + return ord >= 0 && ord < VALS.length ? VALS[ord] : null; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java new file mode 100644 index 0000000000000..eb8595da25282 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheValidateIndexes.java @@ -0,0 +1,291 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + + +package org.apache.ignite.internal.commandline.cache; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg; +import org.apache.ignite.internal.processors.cache.verify.PartitionKey; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.verify.IndexIntegrityCheckIssue; +import org.apache.ignite.internal.visor.verify.IndexValidationIssue; +import org.apache.ignite.internal.visor.verify.ValidateIndexesPartitionResult; +import org.apache.ignite.internal.visor.verify.VisorValidateIndexesJobResult; +import org.apache.ignite.internal.visor.verify.VisorValidateIndexesTaskArg; +import org.apache.ignite.internal.visor.verify.VisorValidateIndexesTaskResult; + +import static org.apache.ignite.internal.commandline.CommandLogger.DOUBLE_INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommandLogger.or; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.cache.CacheCommandList.IDLE_VERIFY; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.OP_NODE_ID; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.usageCache; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.VALIDATE_INDEXES; +import static org.apache.ignite.internal.commandline.cache.argument.IdleVerifyCommandArg.CACHE_FILTER; +import static org.apache.ignite.internal.commandline.cache.argument.IdleVerifyCommandArg.EXCLUDE_CACHES; +import static org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg.CHECK_FIRST; +import static org.apache.ignite.internal.commandline.cache.argument.ValidateIndexesCommandArg.CHECK_THROUGH; +import static org.apache.ignite.internal.processors.cache.GridCacheUtils.UTILITY_CACHE_NAME; + +/** + * Validate indexes command. + */ +public class CacheValidateIndexes implements Command { + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + String CACHES = "cacheName1,...,cacheNameN"; + String description = "Verify counters and hash sums of primary and backup partitions for the specified " + + "caches/cache groups on an idle cluster and print out the differences, if any. " + + "Cache filtering options configure the set of caches that will be processed by " + IDLE_VERIFY + " command. " + + "Default value for the set of cache names (or cache group names) is all cache groups. Default value for " + + EXCLUDE_CACHES + " is empty set. Default value for " + CACHE_FILTER + " is no filtering. Therefore, " + + "the set of all caches is sequently filtered by cache name " + + "regexps, by cache type and after all by exclude regexps."; + + Map map = U.newLinkedHashMap(16); + + map.put(CHECK_FIRST + " N", "validate only the first N keys"); + map.put(CHECK_THROUGH + " K", "validate every Kth key"); + + usageCache(logger, VALIDATE_INDEXES, description, map, + optional(CACHES), OP_NODE_ID, optional(or(CHECK_FIRST + " N", CHECK_THROUGH + " K"))); + } + + /** + * Container for command arguments. + */ + public class Arguments { + /** Caches. */ + private Set caches; + + /** Node id. */ + private UUID nodeId; + + /** Max number of entries to be checked. */ + private int checkFirst = -1; + + /** Number of entries to check through. */ + private int checkThrough = -1; + + /** + * + */ + public Arguments(Set caches, UUID nodeId, int checkFirst, int checkThrough) { + this.caches = caches; + this.nodeId = nodeId; + this.checkFirst = checkFirst; + this.checkThrough = checkThrough; + } + + /** + * @return Caches. + */ + public Set caches() { + return caches; + } + + /** + * @return Max number of entries to be checked. + */ + public int checkFirst() { + return checkFirst; + } + + /** + * @return Number of entries to check through. + */ + public int checkThrough() { + return checkThrough; + } + + + /** + * @return Node id. + */ + public UUID nodeId() { + return nodeId; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } + } + + /** Command parsed arguments. */ + private Arguments args; + + /** {@inheritDoc} */ + @Override public Arguments arg() { + return args; + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + VisorValidateIndexesTaskArg taskArg = new VisorValidateIndexesTaskArg( + args.caches(), + args.nodeId() != null ? Collections.singleton(args.nodeId()) : null, + args.checkFirst(), + args.checkThrough() + ); + + try (GridClient client = Command.startClient(clientCfg)) { + VisorValidateIndexesTaskResult taskRes = executeTaskByNameOnNode( + client, "org.apache.ignite.internal.visor.verify.VisorValidateIndexesTask", taskArg, null, clientCfg); + + boolean errors = CommandLogger.printErrors(taskRes.exceptions(), "Index validation failed on nodes:", logger); + + for (Map.Entry nodeEntry : taskRes.results().entrySet()) { + if (!nodeEntry.getValue().hasIssues()) + continue; + + errors = true; + + logger.info("Index issues found on node " + nodeEntry.getKey() + ":"); + + Collection integrityCheckFailures = nodeEntry.getValue().integrityCheckFailures(); + + if (!integrityCheckFailures.isEmpty()) { + for (IndexIntegrityCheckIssue is : integrityCheckFailures) + logger.info(INDENT + is); + } + + Map partRes = nodeEntry.getValue().partitionResult(); + + for (Map.Entry e : partRes.entrySet()) { + ValidateIndexesPartitionResult res = e.getValue(); + + if (!res.issues().isEmpty()) { + logger.info(INDENT + CommandLogger.join(" ", e.getKey(), e.getValue())); + + for (IndexValidationIssue is : res.issues()) + logger.info(DOUBLE_INDENT + is); + } + } + + Map idxRes = nodeEntry.getValue().indexResult(); + + for (Map.Entry e : idxRes.entrySet()) { + ValidateIndexesPartitionResult res = e.getValue(); + + if (!res.issues().isEmpty()) { + logger.info(INDENT + CommandLogger.join(" ", "SQL Index", e.getKey(), e.getValue())); + + for (IndexValidationIssue is : res.issues()) + logger.info(DOUBLE_INDENT + is); + } + } + } + + if (!errors) + logger.severe("no issues found."); + else + logger.severe("issues found (listed above)."); + + logger.info(""); + + return taskRes; + } + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + int checkFirst = -1; + int checkThrough = -1; + UUID nodeId = null; + Set caches = null; + + int argsCnt = 0; + + while (argIter.hasNextSubArg() && argsCnt++ < 4) { + String nextArg = argIter.nextArg(""); + + ValidateIndexesCommandArg arg = CommandArgUtils.of(nextArg, ValidateIndexesCommandArg.class); + + if (arg == CHECK_FIRST || arg == CHECK_THROUGH) { + if (!argIter.hasNextSubArg()) + throw new IllegalArgumentException("Numeric value for '" + nextArg + "' parameter expected."); + + int numVal; + + String numStr = argIter.nextArg(""); + + try { + numVal = Integer.parseInt(numStr); + } + catch (IllegalArgumentException e) { + throw new IllegalArgumentException( + "Not numeric value was passed for '" + nextArg + "' parameter: " + numStr + ); + } + + if (numVal <= 0) + throw new IllegalArgumentException("Value for '" + nextArg + "' property should be positive."); + + if (arg == CHECK_FIRST) + checkFirst = numVal; + else + checkThrough = numVal; + + continue; + } + + try { + nodeId = UUID.fromString(nextArg); + + continue; + } + catch (IllegalArgumentException ignored) { + //No-op. + } + + caches = argIter.parseStringSet(nextArg); + + if (F.constainsStringIgnoreCase(caches, UTILITY_CACHE_NAME)) { + throw new IllegalArgumentException( + VALIDATE_INDEXES + " not allowed for `" + UTILITY_CACHE_NAME + "` cache." + ); + } + } + + args = new Arguments(caches, nodeId, checkFirst, checkThrough); + } + + /** {@inheritDoc} */ + @Override public String name() { + return VALIDATE_INDEXES.text().toUpperCase(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java new file mode 100644 index 0000000000000..55ed3740e3555 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/CacheViewer.java @@ -0,0 +1,444 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline.cache; + +import java.util.Collection; +import java.util.LinkedHashMap; +import java.util.Map; +import java.util.UUID; +import java.util.logging.Logger; +import java.util.stream.Collectors; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.client.GridClientException; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.OutputFormat; +import org.apache.ignite.internal.commandline.TaskExecutor; +import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.commandline.cache.argument.ListCommandArg; +import org.apache.ignite.internal.processors.cache.verify.CacheInfo; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.SB; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.cache.VisorCacheAffinityConfiguration; +import org.apache.ignite.internal.visor.cache.VisorCacheConfiguration; +import org.apache.ignite.internal.visor.cache.VisorCacheConfigurationCollectorTask; +import org.apache.ignite.internal.visor.cache.VisorCacheConfigurationCollectorTaskArg; +import org.apache.ignite.internal.visor.cache.VisorCacheEvictionConfiguration; +import org.apache.ignite.internal.visor.cache.VisorCacheNearConfiguration; +import org.apache.ignite.internal.visor.cache.VisorCacheRebalanceConfiguration; +import org.apache.ignite.internal.visor.cache.VisorCacheStoreConfiguration; +import org.apache.ignite.internal.visor.query.VisorQueryConfiguration; +import org.apache.ignite.internal.visor.verify.VisorViewCacheCmd; +import org.apache.ignite.internal.visor.verify.VisorViewCacheTask; +import org.apache.ignite.internal.visor.verify.VisorViewCacheTaskArg; +import org.apache.ignite.internal.visor.verify.VisorViewCacheTaskResult; + +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommandLogger.or; +import static org.apache.ignite.internal.commandline.OutputFormat.MULTI_LINE; +import static org.apache.ignite.internal.commandline.OutputFormat.SINGLE_LINE; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.OP_NODE_ID; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.usageCache; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.LIST; +import static org.apache.ignite.internal.commandline.cache.argument.ListCommandArg.CONFIG; +import static org.apache.ignite.internal.commandline.cache.argument.ListCommandArg.GROUP; +import static org.apache.ignite.internal.commandline.cache.argument.ListCommandArg.OUTPUT_FORMAT; +import static org.apache.ignite.internal.commandline.cache.argument.ListCommandArg.SEQUENCE; +import static org.apache.ignite.internal.visor.verify.VisorViewCacheCmd.CACHES; +import static org.apache.ignite.internal.visor.verify.VisorViewCacheCmd.GROUPS; +import static org.apache.ignite.internal.visor.verify.VisorViewCacheCmd.SEQ; + +/** + * Command to show caches on cluster. + */ +public class CacheViewer implements Command { + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + String description = "Show information about caches, groups or sequences that match a regular expression. " + + "When executed without parameters, this subcommand prints the list of caches."; + + Map map = U.newLinkedHashMap(16); + + map.put(CONFIG.toString(), "print all configuration parameters for each cache."); + map.put(OUTPUT_FORMAT + " " + MULTI_LINE, "print configuration parameters per line. This option has effect only " + + "when used with " + CONFIG + " and without " + optional(or(GROUP, SEQUENCE)) + "."); + map.put(GROUP.toString(), "print information about groups."); + map.put(SEQUENCE.toString(), "print information about sequences."); + + usageCache(logger, LIST, description, map, "regexPattern", + optional(or(GROUP, SEQUENCE)), OP_NODE_ID, optional(CONFIG), optional(OUTPUT_FORMAT, MULTI_LINE)); + } + + /** + * Container for command arguments. + */ + public static class Arguments { + /** Regex. */ + private String regex; + + /** Full config flag. */ + private boolean fullConfig; + + /** Node id. */ + private UUID nodeId; + + /** Cache view command. */ + private VisorViewCacheCmd cacheCmd; + + /** Output format. */ + private OutputFormat outputFormat; + + /** + * + */ + public Arguments(String regex, boolean fullConfig, UUID nodeId, VisorViewCacheCmd cacheCmd, OutputFormat outputFormat) { + this.regex = regex; + this.fullConfig = fullConfig; + this.nodeId = nodeId; + this.cacheCmd = cacheCmd; + this.outputFormat = outputFormat; + } + + /** + * @return Regex. + */ + public String regex() { + return regex; + } + + /** + * @return Node id. + */ + public UUID nodeId() { + return nodeId; + } + + /** + * @return Output format. + */ + public OutputFormat outputFormat() { return outputFormat; } + + /** + * @return Cache view command. + */ + public VisorViewCacheCmd cacheCommand() { + return cacheCmd; + } + + /** + * @return Full config flag. + */ + public boolean fullConfig(){ return fullConfig; } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } + } + + /** Command parsed arguments */ + private Arguments args; + + /** {@inheritDoc} */ + @Override public Arguments arg() { + return args; + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + VisorViewCacheTaskArg taskArg = new VisorViewCacheTaskArg(args.regex(), args.cacheCommand()); + + VisorViewCacheTaskResult res; + + try (GridClient client = Command.startClient(clientCfg)) { + res = TaskExecutor.executeTaskByNameOnNode( + client, + VisorViewCacheTask.class.getName(), + taskArg, + args.nodeId(), + clientCfg + ); + + if (args.fullConfig() && args.cacheCommand() == CACHES) + cachesConfig(client, args, res, clientCfg, logger); + else + printCacheInfos(res.cacheInfos(), args.cacheCommand(), logger); + } + + + return res; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + String regex = argIter.nextArg("Regex is expected"); + boolean fullConfig = false; + VisorViewCacheCmd cacheCmd = CACHES; + OutputFormat outputFormat = SINGLE_LINE; + UUID nodeId = null; + + while (argIter.hasNextSubArg()) { + String nextArg = argIter.nextArg("").toLowerCase(); + + ListCommandArg arg = CommandArgUtils.of(nextArg, ListCommandArg.class); + + if (arg != null) { + switch (arg) { + case GROUP: + cacheCmd = GROUPS; + + break; + + case SEQUENCE: + cacheCmd = SEQ; + + break; + + case OUTPUT_FORMAT: + String tmp2 = argIter.nextArg("output format must be defined!").toLowerCase(); + + outputFormat = OutputFormat.fromConsoleName(tmp2); + + break; + + case CONFIG: + fullConfig = true; + + break; + } + } + else + nodeId = UUID.fromString(nextArg); + } + + args = new Arguments(regex, fullConfig, nodeId, cacheCmd, outputFormat); + } + + /** + * Maps VisorCacheConfiguration to key-value pairs. + * + * @param cfg Visor cache configuration. + * @return map of key-value pairs. + */ + private static Map mapToPairs(VisorCacheConfiguration cfg) { + Map params = new LinkedHashMap<>(); + + VisorCacheAffinityConfiguration affinityCfg = cfg.getAffinityConfiguration(); + VisorCacheNearConfiguration nearCfg = cfg.getNearConfiguration(); + VisorCacheRebalanceConfiguration rebalanceCfg = cfg.getRebalanceConfiguration(); + VisorCacheEvictionConfiguration evictCfg = cfg.getEvictionConfiguration(); + VisorCacheStoreConfiguration storeCfg = cfg.getStoreConfiguration(); + VisorQueryConfiguration qryCfg = cfg.getQueryConfiguration(); + + params.put("Name", cfg.getName()); + params.put("Group", cfg.getGroupName()); + params.put("Dynamic Deployment ID", cfg.getDynamicDeploymentId()); + params.put("System", cfg.isSystem()); + + params.put("Mode", cfg.getMode()); + params.put("Atomicity Mode", cfg.getAtomicityMode()); + params.put("Statistic Enabled", cfg.isStatisticsEnabled()); + params.put("Management Enabled", cfg.isManagementEnabled()); + + params.put("On-heap cache enabled", cfg.isOnheapCacheEnabled()); + params.put("Partition Loss Policy", cfg.getPartitionLossPolicy()); + params.put("Query Parallelism", cfg.getQueryParallelism()); + params.put("Copy On Read", cfg.isCopyOnRead()); + params.put("Listener Configurations", cfg.getListenerConfigurations()); + params.put("Load Previous Value", cfg.isLoadPreviousValue()); + params.put("Memory Policy Name", cfg.getMemoryPolicyName()); + params.put("Node Filter", cfg.getNodeFilter()); + params.put("Read From Backup", cfg.isReadFromBackup()); + params.put("Topology Validator", cfg.getTopologyValidator()); + + params.put("Time To Live Eager Flag", cfg.isEagerTtl()); + + params.put("Write Synchronization Mode", cfg.getWriteSynchronizationMode()); + params.put("Invalidate", cfg.isInvalidate()); + + params.put("Affinity Function", affinityCfg.getFunction()); + params.put("Affinity Backups", affinityCfg.getPartitionedBackups()); + params.put("Affinity Partitions", affinityCfg.getPartitions()); + params.put("Affinity Exclude Neighbors", affinityCfg.isExcludeNeighbors()); + params.put("Affinity Mapper", affinityCfg.getMapper()); + + params.put("Rebalance Mode", rebalanceCfg.getMode()); + params.put("Rebalance Batch Size", rebalanceCfg.getBatchSize()); + params.put("Rebalance Timeout", rebalanceCfg.getTimeout()); + params.put("Rebalance Delay", rebalanceCfg.getPartitionedDelay()); + params.put("Time Between Rebalance Messages", rebalanceCfg.getThrottle()); + params.put("Rebalance Batches Count", rebalanceCfg.getBatchesPrefetchCnt()); + params.put("Rebalance Cache Order", rebalanceCfg.getRebalanceOrder()); + + params.put("Eviction Policy Enabled", (evictCfg.getPolicy() != null)); + params.put("Eviction Policy Factory", evictCfg.getPolicy()); + params.put("Eviction Policy Max Size", evictCfg.getPolicyMaxSize()); + params.put("Eviction Filter", evictCfg.getFilter()); + + params.put("Near Cache Enabled", nearCfg.isNearEnabled()); + params.put("Near Start Size", nearCfg.getNearStartSize()); + params.put("Near Eviction Policy Factory", nearCfg.getNearEvictPolicy()); + params.put("Near Eviction Policy Max Size", nearCfg.getNearEvictMaxSize()); + + params.put("Default Lock Timeout", cfg.getDefaultLockTimeout()); + params.put("Query Entities", cfg.getQueryEntities()); + params.put("Cache Interceptor", cfg.getInterceptor()); + + params.put("Store Enabled", storeCfg.isEnabled()); + params.put("Store Class", storeCfg.getStore()); + params.put("Store Factory Class", storeCfg.getStoreFactory()); + params.put("Store Keep Binary", storeCfg.isStoreKeepBinary()); + params.put("Store Read Through", storeCfg.isReadThrough()); + params.put("Store Write Through", storeCfg.isWriteThrough()); + params.put("Store Write Coalescing", storeCfg.getWriteBehindCoalescing()); + + params.put("Write-Behind Enabled", storeCfg.isWriteBehindEnabled()); + params.put("Write-Behind Flush Size", storeCfg.getFlushSize()); + params.put("Write-Behind Frequency", storeCfg.getFlushFrequency()); + params.put("Write-Behind Flush Threads Count", storeCfg.getFlushThreadCount()); + params.put("Write-Behind Batch Size", storeCfg.getBatchSize()); + + params.put("Concurrent Asynchronous Operations Number", cfg.getMaxConcurrentAsyncOperations()); + + params.put("Loader Factory Class Name", cfg.getLoaderFactory()); + params.put("Writer Factory Class Name", cfg.getWriterFactory()); + params.put("Expiry Policy Factory Class Name", cfg.getExpiryPolicyFactory()); + + params.put("Query Execution Time Threshold", qryCfg.getLongQueryWarningTimeout()); + params.put("Query Escaped Names", qryCfg.isSqlEscapeAll()); + params.put("Query SQL Schema", qryCfg.getSqlSchema()); + params.put("Query SQL functions", qryCfg.getSqlFunctionClasses()); + params.put("Query Indexed Types", qryCfg.getIndexedTypes()); + params.put("Maximum payload size for offheap indexes", cfg.getSqlIndexMaxInlineSize()); + params.put("Query Metrics History Size", cfg.getQueryDetailMetricsSize()); + + return params; + } + + /** + * Prints caches config. + * + * @param caches Caches config. + * @param outputFormat Output format. + * @param cacheToMapped Map cache name to mapped. + */ + private void printCachesConfig( + Map caches, + OutputFormat outputFormat, + Map cacheToMapped, + Logger logger + ) { + + for (Map.Entry entry : caches.entrySet()) { + String cacheName = entry.getKey(); + + switch (outputFormat) { + case MULTI_LINE: + Map params = mapToPairs(entry.getValue()); + + params.put("Mapped", cacheToMapped.get(cacheName)); + + logger.info(String.format("[cache = '%s']%n", cacheName)); + + for (Map.Entry innerEntry : params.entrySet()) + logger.info(String.format("%s: %s%n", innerEntry.getKey(), innerEntry.getValue())); + + logger.info(""); + + break; + + default: + int mapped = cacheToMapped.get(cacheName); + + logger.info(String.format("%s: %s %s=%s%n", entry.getKey(), toString(entry.getValue()), "mapped", mapped)); + + break; + } + } + } + + /** + * Invokes toString() method and cuts class name from result string. + * + * @param cfg Visor cache configuration for invocation. + * @return String representation without class name in begin of string. + */ + private String toString(VisorCacheConfiguration cfg) { + return cfg.toString().substring(cfg.getClass().getSimpleName().length() + 1); + } + + /** + * @param client Client. + * @param cacheArgs Cache args. + * @param viewRes Cache view task result. + * @param clientCfg Client configuration. + */ + private void cachesConfig( + GridClient client, + Arguments cacheArgs, + VisorViewCacheTaskResult viewRes, + GridClientConfiguration clientCfg, + Logger logger + ) throws GridClientException { + VisorCacheConfigurationCollectorTaskArg taskArg = new VisorCacheConfigurationCollectorTaskArg(cacheArgs.regex()); + + UUID nodeId = cacheArgs.nodeId() == null ? TaskExecutor.BROADCAST_UUID : cacheArgs.nodeId(); + + Map res = + executeTaskByNameOnNode(client, VisorCacheConfigurationCollectorTask.class.getName(), taskArg, nodeId, clientCfg); + + Map cacheToMapped = + viewRes.cacheInfos().stream().collect(Collectors.toMap(CacheInfo::getCacheName, CacheInfo::getMapped)); + + printCachesConfig(res, cacheArgs.outputFormat(), cacheToMapped, logger); + } + + /** + * Prints caches info. + * + * @param infos Caches info. + * @param cmd Command. + */ + private void printCacheInfos(Collection infos, VisorViewCacheCmd cmd, Logger logger) { + for (CacheInfo info : infos) { + Map map = info.toMap(cmd); + + SB sb = new SB("["); + + for (Map.Entry e : map.entrySet()) + sb.a(e.getKey()).a("=").a(e.getValue()).a(", "); + + sb.setLength(sb.length() - 2); + + sb.a("]"); + + logger.info(sb.toString()); + } + } + + /** {@inheritDoc} */ + @Override public String name() { + return LIST.text().toUpperCase(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java new file mode 100644 index 0000000000000..5006f075cd009 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/FindAndDeleteGarbage.java @@ -0,0 +1,201 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline.cache; + +import java.util.Collections; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.commandline.cache.argument.FindAndDeleteGarbageArg; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceJobResult; +import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceTask; +import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceTaskArg; +import org.apache.ignite.internal.visor.cache.VisorFindAndDeleteGarbageInPersistenceTaskResult; + +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTask; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.OP_NODE_ID; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.usageCache; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.FIND_AND_DELETE_GARBAGE; + +/** + * Command to find and delete garbage which could left after destroying caches in shared group. + */ +public class FindAndDeleteGarbage implements Command { + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + String GROUPS = "groupName1,...,groupNameN"; + String description = "Find and optionally delete garbage from shared cache groups which could be left " + + "after cache destroy."; + + usageCache(logger, FIND_AND_DELETE_GARBAGE, description, null, + optional(GROUPS), OP_NODE_ID, optional(FindAndDeleteGarbageArg.DELETE)); + } + + /** + * Container for command arguments. + */ + public static class Arguments { + /** Groups. */ + private Set groups; + + /** Node id. */ + private UUID nodeId; + + /** Delete garbage flag. */ + private boolean delete; + + /** + * + */ + public Arguments(Set groups, UUID nodeId, boolean delete) { + this.groups = groups; + this.nodeId = nodeId; + this.delete = delete; + } + + /** + * @return Node id. + */ + public UUID nodeId() { + return nodeId; + } + + /** + * @return Cache group to scan for, null means scanning all groups. + */ + public Set groups() { + return groups; + } + + /** + * @return True if it is needed to delete found garbage. + */ + public boolean delete() { + return delete; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } + } + + /** Command parsed arguments. */ + private Arguments args; + + /** {@inheritDoc} */ + @Override public Arguments arg() { + return args; + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + VisorFindAndDeleteGarbageInPersistenceTaskArg taskArg = new VisorFindAndDeleteGarbageInPersistenceTaskArg( + args.groups(), + args.delete(), + args.nodeId() != null ? Collections.singleton(args.nodeId()) : null + ); + + try (GridClient client = Command.startClient(clientCfg)) { + VisorFindAndDeleteGarbageInPersistenceTaskResult taskRes = executeTask( + client, VisorFindAndDeleteGarbageInPersistenceTask.class, taskArg, clientCfg); + + CommandLogger.printErrors(taskRes.exceptions(), "Scanning for garbage failed on nodes:", logger); + + for (Map.Entry nodeEntry : taskRes.result().entrySet()) { + if (!nodeEntry.getValue().hasGarbage()) { + logger.info("Node " + nodeEntry.getKey() + " - garbage not found."); + + continue; + } + + logger.info("Garbage found on node " + nodeEntry.getKey() + ":"); + + VisorFindAndDeleteGarbageInPersistenceJobResult value = nodeEntry.getValue(); + + Map> grpPartErrorsCount = value.checkResult(); + + if (!grpPartErrorsCount.isEmpty()) { + for (Map.Entry> entry : grpPartErrorsCount.entrySet()) { + for (Map.Entry e : entry.getValue().entrySet()) { + logger.info(INDENT + "Group=" + entry.getKey() + + ", partition=" + e.getKey() + + ", count of keys=" + e.getValue()); + } + } + } + + logger.info(""); + } + + return taskRes; + } + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + boolean delete = false; + UUID nodeId = null; + Set groups = null; + + int argsCnt = 0; + + while (argIter.hasNextSubArg() && argsCnt++ < 3) { + String nextArg = argIter.nextArg(""); + + FindAndDeleteGarbageArg arg = CommandArgUtils.of(nextArg, FindAndDeleteGarbageArg.class); + + if (arg == FindAndDeleteGarbageArg.DELETE) { + delete = true; + + continue; + } + + try { + nodeId = UUID.fromString(nextArg); + + continue; + } + catch (IllegalArgumentException ignored) { + //No-op. + } + + groups = argIter.parseStringSet(nextArg); + } + + args = new Arguments(groups, nodeId, delete); + } + + /** {@inheritDoc} */ + @Override public String name() { + return FIND_AND_DELETE_GARBAGE.text().toUpperCase(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java new file mode 100644 index 0000000000000..63afa6d894f7b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/IdleVerify.java @@ -0,0 +1,430 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + + +package org.apache.ignite.internal.commandline.cache; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.function.Consumer; +import java.util.logging.Logger; +import java.util.regex.Pattern; +import java.util.regex.PatternSyntaxException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.IgniteNodeAttributes; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.client.GridClientException; +import org.apache.ignite.internal.client.GridClientNode; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.commandline.cache.argument.IdleVerifyCommandArg; +import org.apache.ignite.internal.processors.cache.verify.IdleVerifyResultV2; +import org.apache.ignite.internal.processors.cache.verify.PartitionHashRecord; +import org.apache.ignite.internal.processors.cache.verify.PartitionKey; +import org.apache.ignite.internal.processors.cache.verify.VerifyBackupPartitionsTaskV2; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.SB; +import org.apache.ignite.internal.visor.verify.CacheFilterEnum; +import org.apache.ignite.internal.visor.verify.VisorIdleVerifyDumpTask; +import org.apache.ignite.internal.visor.verify.VisorIdleVerifyDumpTaskArg; +import org.apache.ignite.internal.visor.verify.VisorIdleVerifyTask; +import org.apache.ignite.internal.visor.verify.VisorIdleVerifyTaskArg; +import org.apache.ignite.internal.visor.verify.VisorIdleVerifyTaskResult; +import org.apache.ignite.internal.visor.verify.VisorIdleVerifyTaskV2; +import org.apache.ignite.lang.IgniteProductVersion; + +import static java.lang.String.format; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.CommandLogger.or; +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTask; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.usageCache; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.IDLE_VERIFY; +import static org.apache.ignite.internal.commandline.cache.argument.IdleVerifyCommandArg.CACHE_FILTER; +import static org.apache.ignite.internal.commandline.cache.argument.IdleVerifyCommandArg.CHECK_CRC; +import static org.apache.ignite.internal.commandline.cache.argument.IdleVerifyCommandArg.DUMP; +import static org.apache.ignite.internal.commandline.cache.argument.IdleVerifyCommandArg.EXCLUDE_CACHES; +import static org.apache.ignite.internal.commandline.cache.argument.IdleVerifyCommandArg.SKIP_ZEROS; +import static org.apache.ignite.internal.processors.cache.GridCacheUtils.UTILITY_CACHE_NAME; +import static org.apache.ignite.internal.visor.verify.CacheFilterEnum.ALL; +import static org.apache.ignite.internal.visor.verify.CacheFilterEnum.NOT_PERSISTENT; +import static org.apache.ignite.internal.visor.verify.CacheFilterEnum.PERSISTENT; +import static org.apache.ignite.internal.visor.verify.CacheFilterEnum.SYSTEM; +import static org.apache.ignite.internal.visor.verify.CacheFilterEnum.USER; + +/** + * + */ +public class IdleVerify implements Command { + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + String CACHES = "cacheName1,...,cacheNameN"; + String description = "Verify counters and hash sums of primary and backup partitions for the specified caches/cache " + + "groups on an idle cluster and print out the differences, if any. When no parameters are specified, " + + "all user caches are verified. Cache filtering options configure the set of caches that will be " + + "processed by " + IDLE_VERIFY + " command. If cache names are specified, in form of regular " + + "expressions, only matching caches will be verified. Caches matched by regexes specified after " + + EXCLUDE_CACHES + " parameter will be excluded from verification. Using parameter " + CACHE_FILTER + + " you can verify: only " + USER + " caches, only user " + PERSISTENT + " caches, only user " + + NOT_PERSISTENT + " caches, only " + SYSTEM + " caches, or " + ALL + " of the above."; + + usageCache( + logger, + IDLE_VERIFY, + description, + Collections.singletonMap(CHECK_CRC.toString(), + "check the CRC-sum of pages stored on disk before verifying data " + + "consistency in partitions between primary and backup nodes."), + optional(DUMP), optional(SKIP_ZEROS), optional(CHECK_CRC), optional(EXCLUDE_CACHES, CACHES), + optional(CACHE_FILTER, or(ALL, USER, SYSTEM, PERSISTENT, NOT_PERSISTENT)), optional(CACHES)); + } + + /** + * Container for command arguments. + */ + public static class Arguments { + /** Caches. */ + private Set caches; + + /** Exclude caches or groups. */ + private Set excludeCaches; + + /** Calculate partition hash and print into standard output. */ + private boolean dump; + + /** Skip zeros partitions. */ + private boolean skipZeros; + + /** Check CRC sum on idle verify. */ + private boolean idleCheckCrc; + + /** Cache filter. */ + private CacheFilterEnum cacheFilterEnum; + + /** + * + */ + public Arguments(Set caches, Set excludeCaches, boolean dump, boolean skipZeros, + boolean idleCheckCrc, + CacheFilterEnum cacheFilterEnum) { + this.caches = caches; + this.excludeCaches = excludeCaches; + this.dump = dump; + this.skipZeros = skipZeros; + this.idleCheckCrc = idleCheckCrc; + this.cacheFilterEnum = cacheFilterEnum; + } + + /** + * @return Gets filter of caches, which will by checked. + */ + public CacheFilterEnum getCacheFilterEnum() { + return cacheFilterEnum; + } + + /** + * @return Caches. + */ + public Set caches() { + return caches; + } + + /** + * @return Exclude caches or groups. + */ + public Set excludeCaches() { + return excludeCaches; + } + + /** + * @return Calculate partition hash and print into standard output. + */ + public boolean dump() { + return dump; + } + + /** + * @return Check page CRC sum on idle verify flag. + */ + public boolean idleCheckCrc() { + return idleCheckCrc; + } + + + /** + * @return Skip zeros partitions(size == 0) in result. + */ + public boolean isSkipZeros() { + return skipZeros; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } + } + + /** Command parsed arguments. */ + private Arguments args; + + /** {@inheritDoc} */ + @Override public Arguments arg() { + return args; + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + try (GridClient client = Command.startClient(clientCfg)) { + Collection nodes = client.compute().nodes(GridClientNode::connectable); + + boolean idleVerifyV2 = true; + + for (GridClientNode node : nodes) { + String nodeVerStr = node.attribute(IgniteNodeAttributes.ATTR_BUILD_VER); + + IgniteProductVersion nodeVer = IgniteProductVersion.fromString(nodeVerStr); + + if (nodeVer.compareTo(VerifyBackupPartitionsTaskV2.V2_SINCE_VER) < 0) { + idleVerifyV2 = false; + + break; + } + } + + if (args.dump()) + cacheIdleVerifyDump(client, clientCfg, logger); + else if (idleVerifyV2) + cacheIdleVerifyV2(client, clientCfg, logger); + else + legacyCacheIdleVerify(client, clientCfg, logger); + } + + return null; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + Set cacheNames = null; + boolean dump = false; + boolean skipZeros = false; + boolean idleCheckCrc = false; + CacheFilterEnum cacheFilterEnum = CacheFilterEnum.DEFAULT; + Set excludeCaches = null; + + int idleVerifyArgsCnt = 5; + + while (argIter.hasNextSubArg() && idleVerifyArgsCnt-- > 0) { + String nextArg = argIter.nextArg(""); + + IdleVerifyCommandArg arg = CommandArgUtils.of(nextArg, IdleVerifyCommandArg.class); + + if (arg == null) { + cacheNames = argIter.parseStringSet(nextArg); + + validateRegexes(cacheNames); + } + else { + switch (arg) { + case DUMP: + dump = true; + + break; + + case SKIP_ZEROS: + skipZeros = true; + + break; + + case CHECK_CRC: + idleCheckCrc = true; + + break; + + case CACHE_FILTER: + String filter = argIter.nextArg("The cache filter should be specified. The following " + + "values can be used: " + Arrays.toString(CacheFilterEnum.values()) + '.'); + + cacheFilterEnum = CacheFilterEnum.valueOf(filter.toUpperCase()); + + break; + + case EXCLUDE_CACHES: + excludeCaches = argIter.nextStringSet("caches, which will be excluded."); + + validateRegexes(excludeCaches); + + break; + } + } + } + + if (idleCheckCrc) { + if (cacheFilterEnum == ALL || cacheFilterEnum == SYSTEM) { + throw new IllegalArgumentException( + IDLE_VERIFY + " with " + CHECK_CRC + " and " + CACHE_FILTER + " " + ALL + " or " + SYSTEM + + " not allowed. You should remove " + CHECK_CRC + " or change " + CACHE_FILTER + " value." + ); + } + + if (F.constainsStringIgnoreCase(cacheNames, UTILITY_CACHE_NAME)) { + throw new IllegalArgumentException( + IDLE_VERIFY + " with " + CHECK_CRC + " not allowed for `" + UTILITY_CACHE_NAME + "` cache." + ); + } + } + + args = new Arguments(cacheNames, excludeCaches, dump, skipZeros, idleCheckCrc, cacheFilterEnum); + } + + /** + * @param string To validate that given name is valed regex. + */ + private void validateRegexes(Set string) { + string.forEach(s -> { + try { + Pattern.compile(s); + } + catch (PatternSyntaxException e) { + throw new IgniteException(format("Invalid cache name regexp '%s': %s", s, e.getMessage())); + } + }); + } + + /** + * @param client Client. + * @param clientCfg Client configuration. + */ + private void cacheIdleVerifyDump( + GridClient client, + GridClientConfiguration clientCfg, + Logger logger + ) throws GridClientException { + VisorIdleVerifyDumpTaskArg arg = new VisorIdleVerifyDumpTaskArg( + args.caches(), + args.excludeCaches(), + args.isSkipZeros(), + args.getCacheFilterEnum(), + args.idleCheckCrc() + ); + + String path = executeTask(client, VisorIdleVerifyDumpTask.class, arg, clientCfg); + + logParsedArgs(arg, logger::info); + + logger.info("VisorIdleVerifyDumpTask successfully written output to '" + path + "'"); + } + + + /** + * @param client Client. + * @param clientCfg Client configuration. + */ + private void cacheIdleVerifyV2( + GridClient client, + GridClientConfiguration clientCfg, + Logger log + ) throws GridClientException { + VisorIdleVerifyTaskArg taskArg = new VisorIdleVerifyTaskArg( + args.caches(), + args.excludeCaches(), + args.isSkipZeros(), + args.getCacheFilterEnum(), + args.idleCheckCrc() + ); + + IdleVerifyResultV2 res = executeTask(client, VisorIdleVerifyTaskV2.class, taskArg, clientCfg); + + logParsedArgs(taskArg, log::info); + + res.print(log::info); + } + + /** + * Passes idle_verify parsed arguments to given log consumer. + * + * @param args idle_verify arguments. + * @param logConsumer Logger. + */ + public static void logParsedArgs(VisorIdleVerifyTaskArg args, Consumer logConsumer) { + SB options = new SB("idle_verify task was executed with the following args: "); + + options + .a("caches=[") + .a(args.caches() == null ? "" : String.join(", ", args.caches())) + .a("], excluded=[") + .a(args.excludeCaches() == null ? "" : String.join(", ", args.excludeCaches())) + .a("]") + .a(", cacheFilter=[") + .a(args.cacheFilterEnum().toString()) + .a("]\n"); + + logConsumer.accept(options.toString()); + } + + /** + * @param client Client. + * @param clientCfg Client configuration. + */ + private void legacyCacheIdleVerify( + GridClient client, + GridClientConfiguration clientCfg, + Logger logger + ) throws GridClientException { + VisorIdleVerifyTaskResult res = executeTask( + client, + VisorIdleVerifyTask.class, + new VisorIdleVerifyTaskArg( + args.caches(), + args.excludeCaches(), + args.isSkipZeros(), + args.getCacheFilterEnum(), + args.idleCheckCrc() + ), + clientCfg); + + Map> conflicts = res.getConflicts(); + + if (conflicts.isEmpty()) { + logger.info("idle_verify check has finished, no conflicts have been found."); + logger.info(""); + } + else { + logger.info("idle_verify check has finished, found " + conflicts.size() + " conflict partitions."); + logger.info(""); + + for (Map.Entry> entry : conflicts.entrySet()) { + logger.info("Conflict partition: " + entry.getKey()); + + logger.info("Partition instances: " + entry.getValue()); + } + } + } + + /** {@inheritDoc} */ + @Override public String name() { + return IDLE_VERIFY.text().toUpperCase(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/ResetLostPartitions.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/ResetLostPartitions.java new file mode 100644 index 0000000000000..ad43cb23f4b31 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/ResetLostPartitions.java @@ -0,0 +1,82 @@ +/* + * + * * Licensed to the Apache Software Foundation (ASF) under one or more + * * contributor license agreements. See the NOTICE file distributed with + * * this work for additional information regarding copyright ownership. + * * The ASF licenses this file to You under the Apache License, Version 2.0 + * * (the "License"); you may not use this file except in compliance with + * * the License. You may obtain a copy of the License at + * * + * * http://www.apache.org/licenses/LICENSE-2.0 + * * + * * Unless required by applicable law or agreed to in writing, software + * * distributed under the License is distributed on an "AS IS" BASIS, + * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * * See the License for the specific language governing permissions and + * * limitations under the License. + * + * + */ + +package org.apache.ignite.internal.commandline.cache; + +import java.util.Set; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTask; +import org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTaskArg; +import org.apache.ignite.internal.commandline.cache.reset_lost_partitions.CacheResetLostPartitionsTaskResult; + +import static org.apache.ignite.internal.commandline.TaskExecutor.executeTaskByNameOnNode; +import static org.apache.ignite.internal.commandline.cache.CacheCommands.usageCache; +import static org.apache.ignite.internal.commandline.cache.CacheSubcommands.RESET_LOST_PARTITIONS; + +/** + * Command for reseting lost partition state. + */ +public class ResetLostPartitions implements Command> { + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + String CACHES = "cacheName1,...,cacheNameN"; + String description = "Reset the state of lost partitions for the specified caches."; + + usageCache(logger, RESET_LOST_PARTITIONS, description, null, CACHES); + } + + /** + * Command argument. Caches which lost partitions should be reseted. + */ + private Set caches; + + /** {@inheritDoc} */ + @Override public Set arg() { + return caches; + } + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + CacheResetLostPartitionsTaskArg taskArg = new CacheResetLostPartitionsTaskArg(caches); + + try (GridClient client = Command.startClient(clientCfg)) { + CacheResetLostPartitionsTaskResult res = + executeTaskByNameOnNode(client, CacheResetLostPartitionsTask.class.getName(), taskArg, null, clientCfg); + + res.print(System.out); + + return res; + } + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + caches = argIter.nextStringSet("Cache names"); + } + + /** {@inheritDoc} */ + @Override public String name() { + return RESET_LOST_PARTITIONS.text().toUpperCase(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/DistributionCommandArg.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/DistributionCommandArg.java new file mode 100644 index 0000000000000..661ba9eab4894 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/DistributionCommandArg.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.cache.argument; + +import org.apache.ignite.internal.commandline.argument.CommandArg; +import org.apache.ignite.internal.commandline.cache.CacheSubcommands; + +/** + * {@link CacheSubcommands#DISTRIBUTION} command arguments. + */ +public enum DistributionCommandArg implements CommandArg { + /** User attributes. */ + USER_ATTRIBUTES("--user-attributes"); + + /** Option name. */ + private final String name; + + /** */ + DistributionCommandArg(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String argName() { + return name; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/FindAndDeleteGarbageArg.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/FindAndDeleteGarbageArg.java new file mode 100644 index 0000000000000..264a300f7ef9c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/FindAndDeleteGarbageArg.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.cache.argument; + +import org.apache.ignite.internal.commandline.argument.CommandArg; + +/** + * List of extra arguments. + */ +public enum FindAndDeleteGarbageArg implements CommandArg { + DELETE("--delete"); + + /** Argument name. */ + private final String name; + + /** + * @param name Argument name. + */ + FindAndDeleteGarbageArg(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String argName() { + return name; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/IdleVerifyCommandArg.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/IdleVerifyCommandArg.java new file mode 100644 index 0000000000000..6c0c034351326 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/IdleVerifyCommandArg.java @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.cache.argument; + +import org.apache.ignite.internal.commandline.argument.CommandArg; +import org.apache.ignite.internal.commandline.cache.CacheSubcommands; + +/** + * {@link CacheSubcommands#IDLE_VERIFY} command arguments. + */ +public enum IdleVerifyCommandArg implements CommandArg { + /** Dump. */ + DUMP("--dump"), + + /** Skip zeros. */ + SKIP_ZEROS("--skip-zeros"), + + /** Exclude caches. */ + EXCLUDE_CACHES("--exclude-caches"), + + /** Check crc. */ + CHECK_CRC("--check-crc"), + + /** Cache filter. */ + CACHE_FILTER("--cache-filter"); + + /** Option name. */ + private final String name; + + /** */ + IdleVerifyCommandArg(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String argName() { + return name; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/ListCommandArg.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/ListCommandArg.java new file mode 100644 index 0000000000000..2ba4857bad692 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/ListCommandArg.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.cache.argument; + +import org.apache.ignite.internal.commandline.argument.CommandArg; +import org.apache.ignite.internal.commandline.cache.CacheSubcommands; + +/** + * {@link CacheSubcommands#LIST} command arguments. + */ +public enum ListCommandArg implements CommandArg { + /** Group. */ + GROUP("--groups"), + + /** Sequence. */ + SEQUENCE("--seq"), + + /** Output format. */ + OUTPUT_FORMAT("--output-format"), + + /** Config. */ + CONFIG("--config"); + + /** Option name. */ + private final String name; + + /** */ + ListCommandArg(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String argName() { + return name; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/ValidateIndexesCommandArg.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/ValidateIndexesCommandArg.java new file mode 100644 index 0000000000000..723b901381ad3 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/argument/ValidateIndexesCommandArg.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.cache.argument; + +import org.apache.ignite.internal.commandline.argument.CommandArg; +import org.apache.ignite.internal.commandline.cache.CacheSubcommands; + +/** + * {@link CacheSubcommands#VALIDATE_INDEXES} command arguments. + */ +public enum ValidateIndexesCommandArg implements CommandArg { + /** Check first. */ + CHECK_FIRST("--check-first"), + + /** Check through. */ + CHECK_THROUGH("--check-through"); + + /** Option name. */ + private final String name; + + /** */ + ValidateIndexesCommandArg(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } + + /** {@inheritDoc} */ + @Override public String argName() { + return name; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionGroup.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionGroup.java new file mode 100644 index 0000000000000..3e594ff12bf6e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionGroup.java @@ -0,0 +1,102 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.commandline.cache.distribution; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.List; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; + +/** + * DTO for CacheDistributionTask, contains information about group + */ +public class CacheDistributionGroup extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Group identifier. */ + private int grpId; + + /** Group name. */ + private String grpName; + + /** List of partitions. */ + private List partitions; + + /** Default constructor. */ + public CacheDistributionGroup() { + } + + /** + * @param grpId Group identifier. + * @param grpName Group name. + * @param partitions List of partitions. + */ + public CacheDistributionGroup(int grpId, String grpName, List partitions) { + this.grpId = grpId; + this.grpName = grpName; + this.partitions = partitions; + } + + /** */ + public int getGroupId() { + return grpId; + } + + /** */ + public void setGroupId(int grpId) { + this.grpId = grpId; + } + + /** */ + public String getGroupName() { + return grpName; + } + + /** */ + public void setGroupName(String grpName) { + this.grpName = grpName; + } + + /** */ + public List getPartitions() { + return partitions; + } + + /** */ + public void setPartitions( + List partitions) { + this.partitions = partitions; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeInt(grpId); + U.writeString(out, grpName); + U.writeCollection(out, partitions); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, + ObjectInput in) throws IOException, ClassNotFoundException { + grpId = in.readInt(); + grpName = U.readString(in); + partitions = U.readList(in); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionNode.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionNode.java new file mode 100644 index 0000000000000..f53bf34fe877f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionNode.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.commandline.cache.distribution; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; + +/** + * DTO for CacheDistributionTask, contains information about node + */ +public class CacheDistributionNode extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Node identifier. */ + private UUID nodeId; + + /** Network addresses. */ + private String addrs; + + /** User attribute in result. */ + private Map userAttrs; + + /** Information about groups. */ + private List groups; + + /** Default constructor. */ + public CacheDistributionNode() { + } + + /** + * @param nodeId Node identifier. + * @param addrs Network addresses. + * @param userAttrs Map node user attribute. + * @param groups Information about groups. + */ + public CacheDistributionNode(UUID nodeId, String addrs, + Map userAttrs, + List groups) { + this.nodeId = nodeId; + this.addrs = addrs; + this.userAttrs = userAttrs; + this.groups = groups; + } + + /** */ + public UUID getNodeId() { + return nodeId; + } + + /** */ + public void setNodeId(UUID nodeId) { + this.nodeId = nodeId; + } + + /** */ + public String getAddresses() { + return addrs; + } + + /** */ + public void setAddresses(String addrs) { + this.addrs = addrs; + } + + /** + * @return User attribute in result. + */ + public Map getUserAttributes() { + return userAttrs; + } + + /** + * @param userAttrs New user attribute in result. + */ + public void setUserAttributes(Map userAttrs) { + this.userAttrs = userAttrs; + } + + /** */ + public List getGroups() { + return groups; + } + + /** */ + public void setGroups(List groups) { + this.groups = groups; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeUuid(out, nodeId); + U.writeString(out, addrs); + U.writeMap(out, userAttrs); + U.writeCollection(out, groups); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, + ObjectInput in) throws IOException, ClassNotFoundException { + nodeId = U.readUuid(in); + addrs = U.readString(in); + userAttrs = U.readMap(in); + groups = U.readList(in); + } + +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionPartition.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionPartition.java new file mode 100644 index 0000000000000..e0eea60c54e70 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionPartition.java @@ -0,0 +1,136 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.cache.distribution; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; + +/** + * DTO for CacheDistributionTask, contains information about partition + */ +public class CacheDistributionPartition extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Partition identifier. */ + private int partId; + + /** Flag primary or backup partition. */ + private boolean primary; + + /** Partition status. */ + private GridDhtPartitionState state; + + /** Partition update counters. */ + private long updateCntr; + + /** Number of entries in partition. */ + private long size; + + /** Default constructor. */ + public CacheDistributionPartition() { + } + + /** + * @param partId Partition identifier. + * @param primary Flag primary or backup partition. + * @param state Partition status. + * @param updateCntr Partition update counters. + * @param size Number of entries in partition. + */ + public CacheDistributionPartition(int partId, boolean primary, + GridDhtPartitionState state, long updateCntr, long size) { + this.partId = partId; + this.primary = primary; + this.state = state; + this.updateCntr = updateCntr; + this.size = size; + } + + /** */ + public int getPartition() { + return partId; + } + + /** */ + public void setPartition(int partId) { + this.partId = partId; + } + + /** */ + public boolean isPrimary() { + return primary; + } + + /** */ + public void setPrimary(boolean primary) { + this.primary = primary; + } + + /** */ + public GridDhtPartitionState getState() { + return state; + } + + /** */ + public void setState(GridDhtPartitionState state) { + this.state = state; + } + + /** */ + public long getUpdateCounter() { + return updateCntr; + } + + /** */ + public void setUpdateCounter(long updateCntr) { + this.updateCntr = updateCntr; + } + + /** */ + public long getSize() { + return size; + } + + /** */ + public void setSize(long size) { + this.size = size; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeInt(partId); + out.writeBoolean(primary); + U.writeEnum(out, state); + out.writeLong(updateCntr); + out.writeLong(size); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException { + partId = in.readInt(); + primary = in.readBoolean(); + state = GridDhtPartitionState.fromOrdinal(in.readByte()); + updateCntr = in.readLong(); + size = in.readLong(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionTask.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionTask.java new file mode 100644 index 0000000000000..88249dc487aae --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionTask.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.cache.distribution; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.TreeMap; +import java.util.UUID; +import org.apache.ignite.IgniteException; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.internal.processors.affinity.AffinityAssignment; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.DynamicCacheDescriptor; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopologyImpl; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorMultiNodeTask; +import org.jetbrains.annotations.Nullable; + +/** + * Collect information on the distribution of partitions. + */ +public class CacheDistributionTask extends VisorMultiNodeTask { + /** */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Nullable @Override protected CacheDistributionTaskResult reduce0( + List list) throws IgniteException { + Map exceptions = new HashMap<>(); + List infos = new ArrayList<>(); + + for (ComputeJobResult res : list) { + if (res.getException() != null) + exceptions.put(res.getNode().id(), res.getException()); + else + infos.add(res.getData()); + } + + return new CacheDistributionTaskResult(infos, exceptions); + } + + /** {@inheritDoc} */ + @Override protected VisorJob job(CacheDistributionTaskArg arg) { + return new CacheDistributionJob(arg, debug); + } + + /** Job for node. */ + private static class CacheDistributionJob extends VisorJob { + /** */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Argument. + * @param debug Debug. + */ + public CacheDistributionJob(@Nullable CacheDistributionTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override public CacheDistributionNode run(CacheDistributionTaskArg arg) throws IgniteException { + try { + final CacheDistributionNode info = new CacheDistributionNode(); + + final ClusterNode node = ignite.localNode(); + + info.setNodeId(node.id()); + info.setAddresses(node.addresses().toString()); + + if (arg.getUserAttributes() != null) { + info.setUserAttributes(new TreeMap<>()); + + for (String userAttribute : arg.getUserAttributes()) + info.getUserAttributes().put(userAttribute, (String)node.attributes().get(userAttribute)); + } + + info.setGroups(new ArrayList<>()); + + Set grpIds = new HashSet<>(); + + if (arg.getCaches() == null) { + final Collection ctxs = ignite.context().cache().cacheGroups(); + + for (CacheGroupContext ctx : ctxs) + grpIds.add(ctx.groupId()); + } + else { + for (String cacheName : arg.getCaches()) + grpIds.add(CU.cacheId(cacheName)); + } + + if (grpIds.isEmpty()) + return info; + + for (Integer id : grpIds) { + final CacheDistributionGroup grp = new CacheDistributionGroup(); + + info.getGroups().add(grp); + + grp.setGroupId(id); + + final DynamicCacheDescriptor desc = ignite.context().cache().cacheDescriptor(id); + + final CacheGroupContext grpCtx = ignite.context().cache().cacheGroup(desc == null ? id : desc.groupId()); + + grp.setGroupName(grpCtx.cacheOrGroupName()); + + grp.setPartitions(new ArrayList<>()); + + GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl)grpCtx.topology(); + + final AffinityAssignment assignment = grpCtx.affinity().readyAffinity(top.readyTopologyVersion()); + + List locParts = top.localPartitions(); + + for (int i = 0; i < locParts.size(); i++) { + GridDhtLocalPartition part = locParts.get(i); + + if (part == null) + continue; + + final CacheDistributionPartition partDto = new CacheDistributionPartition(); + + grp.getPartitions().add(partDto); + + int p = part.id(); + partDto.setPartition(p); + partDto.setPrimary(assignment.primaryPartitions(node.id()).contains(p)); + partDto.setState(part.state()); + partDto.setUpdateCounter(part.updateCounter()); + partDto.setSize(desc == null ? part.dataStore().fullSize() : part.dataStore().cacheSize(id)); + } + } + return info; + } + catch (Exception e) { + throw new IgniteException(e); + } + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(CacheDistributionJob.class, this); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionTaskArg.java new file mode 100644 index 0000000000000..8e9d7235bae0f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionTaskArg.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.commandline.cache.distribution; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Set; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; + +/** + * Input params for CacheDistributionTask + */ +public class CacheDistributionTaskArg extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Caches. */ + private Set caches; + + /** Add user attribute in result. */ + private Set userAttrs; + + /** + * Default constructor. + */ + public CacheDistributionTaskArg() { + // No-op. + } + + /** + * @param caches Caches. + * @param userAttrs Add user attribute in result. + */ + public CacheDistributionTaskArg(Set caches, Set userAttrs) { + this.caches = caches; + this.userAttrs = userAttrs; + } + + /** + * @return Caches. + */ + public Set getCaches() { + return caches; + } + + /** + * @return Add user attribute in result + */ + public Set getUserAttributes() { + return userAttrs; + } + + /** + * @param userAttrs New add user attribute in result + */ + public void setUserAttributes(Set userAttrs) { + this.userAttrs = userAttrs; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeCollection(out, caches); + U.writeCollection(out, userAttrs); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, + ObjectInput in) throws IOException, ClassNotFoundException { + caches = U.readSet(in); + userAttrs = U.readSet(in); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(CacheDistributionTaskArg.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionTaskResult.java new file mode 100644 index 0000000000000..d50817f5beee6 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/distribution/CacheDistributionTaskResult.java @@ -0,0 +1,349 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.cache.distribution; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.UUID; +import java.util.function.Consumer; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; +import org.jetbrains.annotations.NotNull; + +/** + * Result of CacheDistributionTask + */ +public class CacheDistributionTaskResult extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Job results. */ + private List nodeResList; + + /** Exceptions. */ + private Map exceptions; + + /** + * @param nodeResList Cluster infos. + * @param exceptions Exceptions. + */ + public CacheDistributionTaskResult(List nodeResList, + Map exceptions) { + this.nodeResList = nodeResList; + this.exceptions = exceptions; + } + + /** + * For externalization only. + */ + public CacheDistributionTaskResult() { + } + + /** + * @return Job results. + */ + public Collection jobResults() { + return nodeResList; + } + + /** + * @return Exceptions. + */ + public Map exceptions() { + return exceptions; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeCollection(out, nodeResList); + U.writeMap(out, exceptions); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in + ) throws IOException, ClassNotFoundException { + nodeResList = U.readList(in); + exceptions = U.readMap(in); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(CacheDistributionTaskResult.class, this); + } + + /** + * Print collect information on the distribution of partitions. + * + * @param printer Line printer. + */ + public void print(Consumer printer) { + if (nodeResList.isEmpty()) + return; + + List rows = new ArrayList<>(); + + for (CacheDistributionNode node : nodeResList) { + for (CacheDistributionGroup group : node.getGroups()) { + for (CacheDistributionPartition partition : group.getPartitions()) { + final Row row = new Row(); + row.setGroupId(group.getGroupId()); + row.setGroupName(group.getGroupName()); + row.setPartition(partition.getPartition()); + row.setNodeId(node.getNodeId()); + row.setPrimary(partition.isPrimary()); + row.setState(partition.getState()); + row.setUpdateCounter(partition.getUpdateCounter()); + row.setSize(partition.getSize()); + row.setAddresses(node.getAddresses()); + row.setUserAttributes(node.getUserAttributes()); + + rows.add(row); + } + } + } + + rows.sort(null); + + StringBuilder userAttrsName = new StringBuilder(); + if (!rows.isEmpty() && rows.get(0).userAttrs != null) { + for (String userAttribute : rows.get(0).userAttrs.keySet()) { + userAttrsName.append(','); + + if (userAttribute != null) + userAttrsName.append(userAttribute); + } + } + + printer.accept("[groupId,partition,nodeId,primary,state,updateCounter,partitionSize,nodeAddresses" + userAttrsName + "]"); + + int oldGrpId = 0; + + for (Row row : rows) { + if (oldGrpId != row.grpId) { + printer.accept("[next group: id=" + row.grpId + ", name=" + row.grpName + ']'); + + oldGrpId = row.getGroupId(); + } + + row.print(printer); + } + } + + /** + * Class for + */ + private static class Row implements Comparable { + /** */ + private int grpId; + + /** */ + private String grpName; + + /** */ + private int partId; + + /** */ + private UUID nodeId; + + /** */ + private boolean primary; + + /** */ + private GridDhtPartitionState state; + + /** */ + private long updateCntr; + + /** */ + private long size; + + /** */ + private String addrs; + + /** User attribute in result. */ + private Map userAttrs; + + /** */ + public int getGroupId() { + return grpId; + } + + /** */ + public void setGroupId(int grpId) { + this.grpId = grpId; + } + + /** */ + public String getGroupName() { + return grpName; + } + + /** */ + public void setGroupName(String grpName) { + this.grpName = grpName; + } + + /** */ + public int getPartition() { + return partId; + } + + /** */ + public void setPartition(int partId) { + this.partId = partId; + } + + /** */ + public UUID getNodeId() { + return nodeId; + } + + /** */ + public void setNodeId(UUID nodeId) { + this.nodeId = nodeId; + } + + /** */ + public boolean isPrimary() { + return primary; + } + + /** */ + public void setPrimary(boolean primary) { + this.primary = primary; + } + + /** */ + public GridDhtPartitionState getState() { + return state; + } + + /** */ + public void setState(GridDhtPartitionState state) { + this.state = state; + } + + /** */ + public long getUpdateCounter() { + return updateCntr; + } + + /** */ + public void setUpdateCounter(long updateCntr) { + this.updateCntr = updateCntr; + } + + /** */ + public long getSize() { + return size; + } + + /** */ + public void setSize(long size) { + this.size = size; + } + + /** */ + public String getAddresses() { + return addrs; + } + + /** */ + public void setAddresses(String addrs) { + this.addrs = addrs; + } + + /** + * @return User attribute in result. + */ + public Map getUserAttributes() { + return userAttrs; + } + + /** + * @param userAttrs New user attribute in result. + */ + public void setUserAttributes(Map userAttrs) { + this.userAttrs = userAttrs; + } + + /** {@inheritDoc} */ + @Override public int compareTo(@NotNull Object o) { + assert o instanceof Row; + + Row other = (Row)o; + + int res = Integer.compare(grpId, other.grpId); + + if (res == 0) { + res = Integer.compare(partId, other.partId); + + if (res == 0) + res = nodeId.compareTo(other.nodeId); + + } + + return res; + } + + /** */ + public void print(Consumer printer) { + StringBuilder sb = new StringBuilder(); + + sb.append(grpId); + sb.append(','); + + sb.append(partId); + sb.append(','); + + sb.append(U.id8(getNodeId())); + sb.append(','); + + sb.append(primary ? "P" : "B"); + sb.append(','); + + sb.append(state); + sb.append(','); + + sb.append(updateCntr); + sb.append(','); + + sb.append(size); + sb.append(','); + + sb.append(addrs); + + if (userAttrs != null) { + for (String userAttribute : userAttrs.values()) { + sb.append(','); + if (userAttribute != null) + sb.append(userAttribute); + } + } + + printer.accept(sb.toString()); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/reset_lost_partitions/CacheResetLostPartitionsTask.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/reset_lost_partitions/CacheResetLostPartitionsTask.java new file mode 100644 index 0000000000000..2230a24dc4a0b --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/reset_lost_partitions/CacheResetLostPartitionsTask.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.commandline.cache.reset_lost_partitions; + +import java.util.HashMap; +import java.util.SortedSet; +import java.util.TreeSet; +import java.util.stream.Collectors; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.processors.cache.CacheGroupContext; +import org.apache.ignite.internal.processors.cache.GridCacheContext; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.visor.VisorJob; +import org.apache.ignite.internal.visor.VisorOneNodeTask; +import org.jetbrains.annotations.Nullable; + +/** + * Reset status of lost partitions. + */ +public class CacheResetLostPartitionsTask extends VisorOneNodeTask { + /** */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected VisorJob job( + CacheResetLostPartitionsTaskArg arg) { + return new CacheResetLostPartitionsJob(arg, debug); + } + + /** Job for node. */ + private static class CacheResetLostPartitionsJob extends VisorJob { + /** */ + private static final long serialVersionUID = 0L; + + /** + * @param arg Argument. + * @param debug Debug. + */ + public CacheResetLostPartitionsJob(@Nullable CacheResetLostPartitionsTaskArg arg, boolean debug) { + super(arg, debug); + } + + /** {@inheritDoc} */ + @Override public CacheResetLostPartitionsTaskResult run( + CacheResetLostPartitionsTaskArg arg) throws IgniteException { + try { + final CacheResetLostPartitionsTaskResult res = new CacheResetLostPartitionsTaskResult(); + res.setMessageMap(new HashMap<>()); + + if (!F.isEmpty(arg.getCaches())) { + for (String groupName : arg.getCaches()) { + final int grpId = CU.cacheId(groupName); + + CacheGroupContext grp = ignite.context().cache().cacheGroup(grpId); + + if (grp != null) { + SortedSet cacheNames = grp.caches().stream() + .map(GridCacheContext::name) + .collect(Collectors.toCollection(TreeSet::new)); + + if (!F.isEmpty(cacheNames)) { + ignite.resetLostPartitions(cacheNames); + + res.put(groupName, String.format("Reset LOST-partitions performed successfully. " + + "Cache group (name = '%s', id = %d), caches (%s).", + groupName, grpId, cacheNames)); + } + } + else + res.put(groupName, String.format("Cache group (name = '%s', id = %d) not found.", + groupName, grpId)); + } + } + + return res; + } + catch (Exception e) { + throw new IgniteException(e); + } + + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(CacheResetLostPartitionsJob.class, this); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/reset_lost_partitions/CacheResetLostPartitionsTaskArg.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/reset_lost_partitions/CacheResetLostPartitionsTaskArg.java new file mode 100644 index 0000000000000..2525ec40e6117 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/reset_lost_partitions/CacheResetLostPartitionsTaskArg.java @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.commandline.cache.reset_lost_partitions; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Set; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; + +/** + * Input params for CacheResetLostPartitionsTask + */ +public class CacheResetLostPartitionsTaskArg extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** Caches. */ + private Set caches; + + /** + * Default constructor. + */ + public CacheResetLostPartitionsTaskArg() { + // No-op. + } + + /** + * @param caches Caches. + */ + public CacheResetLostPartitionsTaskArg(Set caches) { + this.caches = caches; + } + + /** + * @return Caches. + */ + public Set getCaches() { + return caches; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeCollection(out, caches); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte ver, ObjectInput in) throws IOException, ClassNotFoundException { + caches = U.readSet(in); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(CacheResetLostPartitionsTaskArg.class, this); + } +} \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/reset_lost_partitions/CacheResetLostPartitionsTaskResult.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/reset_lost_partitions/CacheResetLostPartitionsTaskResult.java new file mode 100644 index 0000000000000..7eb1f31d52951 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/cache/reset_lost_partitions/CacheResetLostPartitionsTaskResult.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.commandline.cache.reset_lost_partitions; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.io.PrintStream; +import java.util.Map; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.visor.VisorDataTransferObject; + +/** + * Result of CacheResetLostPartitionsTask + */ +public class CacheResetLostPartitionsTaskResult extends VisorDataTransferObject { + /** */ + private static final long serialVersionUID = 0L; + + /** + * Map group name to result execute message. + */ + private Map msgMap; + + /** + * @param groupName - Cache group name. + * @param message - Job result message. + * @return the previous value associated with key, or null + */ + public String put(String groupName, String message) { + return this.msgMap.put(groupName, message); + } + + /** + * Print job result. + * + * @param out Print stream. + */ + public void print(PrintStream out) { + if (msgMap == null || msgMap.isEmpty()) + return; + + for (String message : msgMap.values()) + out.println(message); + } + + /** */ + public Map getMessageMap() { + return msgMap; + } + + /** */ + public void setMessageMap(Map messageMap) { + this.msgMap = messageMap; + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + U.writeMap(out, msgMap); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + msgMap = U.readMap(in); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticCommand.java new file mode 100644 index 0000000000000..fee08801d30a1 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticCommand.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.diagnostic; + +import java.util.Arrays; +import java.util.logging.Logger; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; + +import static org.apache.ignite.internal.commandline.Command.usage; +import static org.apache.ignite.internal.commandline.CommandHandler.UTILITY_NAME; +import static org.apache.ignite.internal.commandline.CommandList.DIAGNOSTIC; +import static org.apache.ignite.internal.commandline.CommandLogger.INDENT; +import static org.apache.ignite.internal.commandline.CommandLogger.join; +import static org.apache.ignite.internal.commandline.diagnostic.DiagnosticSubCommand.HELP; +import static org.apache.ignite.internal.commandline.diagnostic.DiagnosticSubCommand.PAGE_LOCKS; + +/** + * + */ +public class DiagnosticCommand implements Command { + /** */ + private DiagnosticSubCommand subcommand; + + /** */ + private Logger logger; + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + this.logger = logger; + + if (subcommand == HELP) { + printDiagnosticHelp(logger); + + return null; + } + + Command command = subcommand.subcommand(); + + if (command == null) + throw new IllegalStateException("Unknown command " + subcommand); + + return command.execute(clientCfg, logger); + } + + /** {@inheritDoc} */ + @Override public DiagnosticSubCommand arg() { + return subcommand; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + if (!argIter.hasNextSubArg()) { + subcommand = HELP; + + return; + } + + String str = argIter.nextArg("").toLowerCase(); + + DiagnosticSubCommand cmd = DiagnosticSubCommand.of(str); + + if (cmd == null) + cmd = HELP; + + switch (cmd) { + case HELP: + break; + + case PAGE_LOCKS: + cmd.subcommand().parseArguments(argIter); + + break; + + default: + throw new IllegalArgumentException("Unknown diagnostic subcommand " + cmd); + } + + if (argIter.hasNextSubArg()) + throw new IllegalArgumentException("Unexpected argument of diagnostic subcommand: " + argIter.peekNextArg()); + + subcommand = cmd; + } + + /** {@inheritDoc} */ + @Override public String name() { + return "diagnostic"; + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + usage(logger, "View diagnostic information in a cluster:", DIAGNOSTIC); + } + + /** + * Print diagnostic command help. + */ + private void printDiagnosticHelp(Logger logger) { + logger.info(INDENT + join(" ", UTILITY_NAME, DIAGNOSTIC, PAGE_LOCKS + " - dump page locks info.")); + + logger.info(INDENT + "Subcommands:"); + + Arrays.stream(DiagnosticSubCommand.values()).forEach(c -> { + if (c.subcommand() != null) c.subcommand().printUsage(logger); + }); + + logger.info(""); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticSubCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticSubCommand.java new file mode 100644 index 0000000000000..2229fde70128c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/DiagnosticSubCommand.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.diagnostic; + +import org.apache.ignite.internal.commandline.Command; + +/** + * + */ +public enum DiagnosticSubCommand { + /** */ + HELP("help", null), + + /** */ + PAGE_LOCKS("pageLocks", new PageLocksCommand()); + + /** Diagnostic command name. */ + private final String name; + + /** Command instance for certain type. */ + private final Command command; + + /** + * @param name Command name. + * @param command Command handler. + */ + DiagnosticSubCommand( + String name, + Command command + ) { + this.name = name; + this.command = command; + } + + /** + * @return Subcommand realization. + */ + public Command subcommand() { + return command; + } + + /** + * @param text Command text. + * @return Command for the text. + */ + public static DiagnosticSubCommand of(String text) { + for (DiagnosticSubCommand cmd : DiagnosticSubCommand.values()) { + if (cmd.name.equalsIgnoreCase(text)) + return cmd; + } + + return null; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java b/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java new file mode 100644 index 0000000000000..cd6dc77677d91 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/commandline/diagnostic/PageLocksCommand.java @@ -0,0 +1,250 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.commandline.diagnostic; + +import java.io.File; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.logging.Logger; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.client.GridClient; +import org.apache.ignite.internal.client.GridClientConfiguration; +import org.apache.ignite.internal.commandline.Command; +import org.apache.ignite.internal.commandline.CommandArgIterator; +import org.apache.ignite.internal.commandline.CommandLogger; +import org.apache.ignite.internal.commandline.TaskExecutor; +import org.apache.ignite.internal.commandline.argument.CommandArg; +import org.apache.ignite.internal.commandline.argument.CommandArgUtils; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.visor.diagnostic.Operation; +import org.apache.ignite.internal.visor.diagnostic.VisorPageLocksResult; +import org.apache.ignite.internal.visor.diagnostic.VisorPageLocksTask; +import org.apache.ignite.internal.visor.diagnostic.VisorPageLocksTrackerArgs; + +import static org.apache.ignite.internal.commandline.CommandHandler.UTILITY_NAME; +import static org.apache.ignite.internal.commandline.CommandList.DIAGNOSTIC; +import static org.apache.ignite.internal.commandline.CommandLogger.join; +import static org.apache.ignite.internal.commandline.CommandLogger.optional; +import static org.apache.ignite.internal.commandline.diagnostic.DiagnosticSubCommand.PAGE_LOCKS; +import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.ALL; +import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.DUMP; +import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.DUMP_LOG; +import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.NODES; +import static org.apache.ignite.internal.commandline.diagnostic.PageLocksCommand.PageLocksCommandArg.PATH; +import static org.apache.ignite.internal.processors.diagnostic.DiagnosticProcessor.DEFAULT_TARGET_FOLDER; + +/** + * + */ +public class PageLocksCommand implements Command { + /** */ + private Arguments arguments; + + /** */ + private Logger logger; + + + /** {@inheritDoc} */ + @Override public Object execute(GridClientConfiguration clientCfg, Logger logger) throws Exception { + this.logger = logger; + + Set nodeIds = arguments.nodeIds; + + Map res; + + try (GridClient client = Command.startClient(clientCfg)) { + if (arguments.allNodes) { + client.compute().nodes().forEach(n -> { + nodeIds.add(String.valueOf(n.consistentId())); + nodeIds.add(n.nodeId().toString()); + }); + } + + VisorPageLocksTrackerArgs taskArg = new VisorPageLocksTrackerArgs(arguments.operation, arguments.filePath, nodeIds); + + res = TaskExecutor.executeTask( + client, + VisorPageLocksTask.class, + taskArg, + clientCfg + ); + } + + printResult(res); + + return res; + } + + /** {@inheritDoc} */ + @Override public Arguments arg() { + return arguments; + } + + /** {@inheritDoc} */ + @Override public void parseArguments(CommandArgIterator argIter) { + Operation op = Operation.DUMP_LOG; + + String path = null; + boolean allNodes = false; + Set nodeIds = new HashSet<>(); + + while (argIter.hasNextArg()) { + String nextArg = argIter.nextArg(""); + + PageLocksCommandArg arg = CommandArgUtils.of(nextArg, PageLocksCommandArg.class); + + if (arg == null) + break; + + switch (arg) { + case DUMP: + op = Operation.DUMP_FILE; + + break; + case DUMP_LOG: + op = Operation.DUMP_LOG; + + break; + case ALL: + allNodes = true; + + break; + case NODES: + nodeIds.addAll(argIter.nextStringSet("")); + + break; + case PATH: + path = argIter.nextArg(""); + + break; + default: + throw new IllegalArgumentException( + "Unexpected argumetn:" + arg + ", supported:" + Arrays.toString(PageLocksCommandArg.values()) + ); + } + } + + arguments = new Arguments(op, path, allNodes, nodeIds); + } + + /** {@inheritDoc} */ + @Override public void printUsage(Logger logger) { + logger.info("View pages locks state information on the node or nodes."); + logger.info(join(" ", + UTILITY_NAME, DIAGNOSTIC, PAGE_LOCKS, DUMP, + optional(PATH, "path_to_directory"), + optional(ALL), + optional(CommandLogger.or(NODES, "nodeId1,nodeId2,..")), + optional(CommandLogger.or(NODES, "consistentId1,consistentId2,..")), + "// Save page locks dump to file generated in IGNITE_HOME" + + File.separatorChar + "work" + File.separatorChar + DEFAULT_TARGET_FOLDER + " directory.")); + logger.info(join(" ", + UTILITY_NAME, DIAGNOSTIC, PAGE_LOCKS, DUMP_LOG, + optional(ALL), + optional(CommandLogger.or(NODES, "nodeId1,nodeId2,..")), + optional(CommandLogger.or(NODES, "consistentId1,consistentId2,..")), + "// Pring page locks dump to console on the node or nodes.")); + logger.info(""); + } + + /** {@inheritDoc} */ + @Override public String name() { + return PAGE_LOCKS.toString(); + } + + /** + * @param res Result. + */ + private void printResult(Map res) { + res.forEach((n, res0) -> { + logger.info(n.id() + " (" + n.consistentId() + ") " + res0.result()); + }); + } + + /** */ + public static class Arguments { + /** */ + private final Operation operation; + /** */ + private final String filePath; + /** */ + private final boolean allNodes; + /** */ + private final Set nodeIds; + + /** + * @param operation Operation. + * @param filePath File path. + * @param allNodes If {@code True} include all available nodes for command. If {@code False} include only subset. + * @param nodeIds Node ids. + */ + public Arguments( + Operation operation, + String filePath, + boolean allNodes, + Set nodeIds + ) { + this.operation = operation; + this.filePath = filePath; + this.allNodes = allNodes; + this.nodeIds = nodeIds; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(Arguments.class, this); + } + } + + enum PageLocksCommandArg implements CommandArg { + /** */ + DUMP("dump"), + + /** */ + DUMP_LOG("dump_log"), + + /** */ + PATH("--path"), + + /** */ + NODES("--nodes"), + + /** */ + ALL("--all"); + + /** Option name. */ + private final String name; + + /** */ + PageLocksCommandArg(String name) { + this.name = name; + } + + /** {@inheritDoc} */ + @Override public String argName() { + return name; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return name; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageReader.java b/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageReader.java index 7d3644fdef18e..b4e52726d3bb2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageReader.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageReader.java @@ -27,6 +27,9 @@ import org.apache.ignite.internal.direct.stream.DirectByteBufferStream; import org.apache.ignite.internal.direct.stream.v1.DirectByteBufferStreamImplV1; import org.apache.ignite.internal.direct.stream.v2.DirectByteBufferStreamImplV2; +import org.apache.ignite.internal.direct.stream.v3.DirectByteBufferStreamImplV3; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.lang.IgniteOutClosure; import org.apache.ignite.lang.IgniteUuid; @@ -41,8 +44,13 @@ */ public class DirectMessageReader implements MessageReader { /** State. */ + @GridToStringInclude private final DirectMessageState state; + /** Protocol version. */ + @GridToStringInclude + private final byte protoVer; + /** Whether last field was fully read. */ private boolean lastRead; @@ -56,6 +64,8 @@ public DirectMessageReader(final MessageFactory msgFactory, final byte protoVer) return new StateItem(msgFactory, protoVer); } }); + + this.protoVer = protoVer; } /** {@inheritDoc} */ @@ -304,6 +314,21 @@ public DirectMessageReader(final MessageFactory msgFactory, final byte protoVer) return val; } + /** {@inheritDoc} */ + @Override public AffinityTopologyVersion readAffinityTopologyVersion(String name) { + if (protoVer >= 3) { + DirectByteBufferStream stream = state.item().stream; + + AffinityTopologyVersion val = stream.readAffinityTopologyVersion(); + + lastRead = stream.lastFinished(); + + return val; + } + + return readMessage(name); + } + /** {@inheritDoc} */ @Nullable @Override public T readMessage(String name) { DirectByteBufferStream stream = state.item().stream; @@ -409,6 +434,11 @@ public StateItem(MessageFactory msgFactory, byte protoVer) { break; + case 3: + stream = new DirectByteBufferStreamImplV3(msgFactory); + + break; + default: throw new IllegalStateException("Invalid protocol version: " + protoVer); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageWriter.java b/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageWriter.java index b224d68fbd8f2..ba30538e372a2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageWriter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/direct/DirectMessageWriter.java @@ -27,6 +27,8 @@ import org.apache.ignite.internal.direct.stream.DirectByteBufferStream; import org.apache.ignite.internal.direct.stream.v1.DirectByteBufferStreamImplV1; import org.apache.ignite.internal.direct.stream.v2.DirectByteBufferStreamImplV2; +import org.apache.ignite.internal.direct.stream.v3.DirectByteBufferStreamImplV3; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.lang.IgniteOutClosure; @@ -44,6 +46,10 @@ public class DirectMessageWriter implements MessageWriter { @GridToStringInclude private final DirectMessageState state; + /** Protocol version. */ + @GridToStringInclude + private final byte protoVer; + /** * @param protoVer Protocol version. */ @@ -53,6 +59,8 @@ public DirectMessageWriter(final byte protoVer) { return new StateItem(protoVer); } }); + + this.protoVer = protoVer; } /** {@inheritDoc} */ @@ -272,6 +280,19 @@ public DirectMessageWriter(final byte protoVer) { return stream.lastFinished(); } + /** {@inheritDoc} */ + @Override public boolean writeAffinityTopologyVersion(String name, AffinityTopologyVersion val) { + if (protoVer >= 3) { + DirectByteBufferStream stream = state.item().stream; + + stream.writeAffinityTopologyVersion(val); + + return stream.lastFinished(); + } + + return writeMessage(name, val); + } + /** {@inheritDoc} */ @Override public boolean writeMessage(String name, @Nullable Message msg) { DirectByteBufferStream stream = state.item().stream; @@ -376,6 +397,11 @@ public StateItem(byte protoVer) { break; + case 3: + stream = new DirectByteBufferStreamImplV3(null); + + break; + default: throw new IllegalStateException("Invalid protocol version: " + protoVer); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/DirectByteBufferStream.java b/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/DirectByteBufferStream.java index 204e6b034530b..ae5502eb75f8e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/DirectByteBufferStream.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/DirectByteBufferStream.java @@ -22,6 +22,7 @@ import java.util.Collection; import java.util.Map; import java.util.UUID; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.plugin.extensions.communication.Message; import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; @@ -160,6 +161,11 @@ public interface DirectByteBufferStream { */ public void writeIgniteUuid(IgniteUuid val); + /** + * @param val Value. + */ + public void writeAffinityTopologyVersion(AffinityTopologyVersion val); + /** * @param msg Message. * @param writer Writer. @@ -289,6 +295,11 @@ public void writeMap(Map map, MessageCollectionItemType keyType, Me */ public IgniteUuid readIgniteUuid(); + /** + * @return Value. + */ + public AffinityTopologyVersion readAffinityTopologyVersion(); + /** * @param reader Reader. * @return Message. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v1/DirectByteBufferStreamImplV1.java b/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v1/DirectByteBufferStreamImplV1.java index 76cb76243c757..7e845e0cd6ae1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v1/DirectByteBufferStreamImplV1.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v1/DirectByteBufferStreamImplV1.java @@ -27,6 +27,7 @@ import java.util.NoSuchElementException; import java.util.UUID; import org.apache.ignite.internal.direct.stream.DirectByteBufferStream; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.typedef.internal.S; @@ -494,6 +495,11 @@ public DirectByteBufferStreamImplV1(MessageFactory msgFactory) { writeByteArray(val != null ? U.igniteUuidToBytes(val) : null); } + /** {@inheritDoc} */ + @Override public void writeAffinityTopologyVersion(AffinityTopologyVersion val) { + throw new UnsupportedOperationException("Not implemented"); + } + /** {@inheritDoc} */ @Override public void writeMessage(Message msg, MessageWriter writer) { if (msg != null) { @@ -811,6 +817,11 @@ public DirectByteBufferStreamImplV1(MessageFactory msgFactory) { return arr != null ? U.bytesToIgniteUuid(arr, 0) : null; } + /** {@inheritDoc} */ + @Override public AffinityTopologyVersion readAffinityTopologyVersion() { + throw new UnsupportedOperationException("Not implemented"); + } + /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public T readMessage(MessageReader reader) { @@ -1212,6 +1223,7 @@ private void write(MessageCollectionItemType type, Object val, MessageWriter wri break; + case AFFINITY_TOPOLOGY_VERSION: case MSG: try { if (val != null) @@ -1298,6 +1310,7 @@ private Object read(MessageCollectionItemType type, MessageReader reader) { case IGNITE_UUID: return readIgniteUuid(); + case AFFINITY_TOPOLOGY_VERSION: case MSG: return readMessage(reader); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v2/DirectByteBufferStreamImplV2.java b/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v2/DirectByteBufferStreamImplV2.java index e338bc0187d46..fd93cfb81f832 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v2/DirectByteBufferStreamImplV2.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v2/DirectByteBufferStreamImplV2.java @@ -29,6 +29,7 @@ import java.util.UUID; import org.apache.ignite.IgniteException; import org.apache.ignite.internal.direct.stream.DirectByteBufferStream; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.typedef.internal.S; @@ -298,7 +299,7 @@ public class DirectByteBufferStreamImplV2 implements DirectByteBufferStream { private long uuidLocId; /** */ - private boolean lastFinished; + protected boolean lastFinished; /** * @param msgFactory Message factory. @@ -657,6 +658,11 @@ public DirectByteBufferStreamImplV2(MessageFactory msgFactory) { } } + /** {@inheritDoc} */ + @Override public void writeAffinityTopologyVersion(AffinityTopologyVersion val) { + throw new UnsupportedOperationException("Not implemented"); + } + /** {@inheritDoc} */ @Override public void writeMessage(Message msg, MessageWriter writer) { if (msg != null) { @@ -1152,6 +1158,11 @@ private void writeRandomAccessList(List list, MessageCollectionItemType i return val; } + /** {@inheritDoc} */ + @Override public AffinityTopologyVersion readAffinityTopologyVersion() { + throw new UnsupportedOperationException("Not implemented"); + } + /** {@inheritDoc} */ @SuppressWarnings("unchecked") @Override public T readMessage(MessageReader reader) { @@ -1587,7 +1598,7 @@ T readArrayLE(ArrayCreator creator, int typeSize, int lenShift, long off) * @param val Value. * @param writer Writer. */ - private void write(MessageCollectionItemType type, Object val, MessageWriter writer) { + protected void write(MessageCollectionItemType type, Object val, MessageWriter writer) { switch (type) { case BYTE: writeByte((Byte)val); @@ -1689,6 +1700,7 @@ private void write(MessageCollectionItemType type, Object val, MessageWriter wri break; + case AFFINITY_TOPOLOGY_VERSION: case MSG: try { if (val != null) @@ -1713,7 +1725,7 @@ private void write(MessageCollectionItemType type, Object val, MessageWriter wri * @param reader Reader. * @return Value. */ - private Object read(MessageCollectionItemType type, MessageReader reader) { + protected Object read(MessageCollectionItemType type, MessageReader reader) { switch (type) { case BYTE: return readByte(); @@ -1775,6 +1787,7 @@ private Object read(MessageCollectionItemType type, MessageReader reader) { case IGNITE_UUID: return readIgniteUuid(); + case AFFINITY_TOPOLOGY_VERSION: case MSG: return readMessage(reader); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v3/DirectByteBufferStreamImplV3.java b/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v3/DirectByteBufferStreamImplV3.java new file mode 100644 index 0000000000000..89043ebcbc0c5 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/direct/stream/v3/DirectByteBufferStreamImplV3.java @@ -0,0 +1,298 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.direct.stream.v3; + +import java.util.BitSet; +import java.util.UUID; +import org.apache.ignite.internal.direct.stream.v2.DirectByteBufferStreamImplV2; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.lang.IgniteUuid; +import org.apache.ignite.plugin.extensions.communication.Message; +import org.apache.ignite.plugin.extensions.communication.MessageCollectionItemType; +import org.apache.ignite.plugin.extensions.communication.MessageFactory; +import org.apache.ignite.plugin.extensions.communication.MessageReader; +import org.apache.ignite.plugin.extensions.communication.MessageWriter; + +/** + * + */ +public class DirectByteBufferStreamImplV3 extends DirectByteBufferStreamImplV2 { + /** */ + private byte topVerState; + + /** */ + private long topVerMajor; + + /** */ + private int topVerMinor; + + /** + * @param msgFactory Message factory. + */ + public DirectByteBufferStreamImplV3(MessageFactory msgFactory) { + super(msgFactory); + } + + /** {@inheritDoc} */ + @Override public void writeAffinityTopologyVersion(AffinityTopologyVersion val) { + if (val != null) { + switch (topVerState) { + case 0: + writeInt(val.minorTopologyVersion()); + + if (!lastFinished) + return; + + topVerState++; + + case 1: + writeLong(val.topologyVersion()); + + if (!lastFinished) + return; + + topVerState = 0; + } + } + else + writeInt(-1); + } + + /** {@inheritDoc} */ + @Override public AffinityTopologyVersion readAffinityTopologyVersion() { + switch (topVerState) { + case 0: + topVerMinor = readInt(); + + if (!lastFinished || topVerMinor == -1) + return null; + + topVerState++; + + case 1: + topVerMajor = readLong(); + + if (!lastFinished) + return null; + + topVerState = 0; + } + + return new AffinityTopologyVersion(topVerMajor, topVerMinor); + } + + /** {@inheritDoc} */ + @Override protected void write(MessageCollectionItemType type, Object val, MessageWriter writer) { + switch (type) { + case BYTE: + writeByte((Byte)val); + + break; + + case SHORT: + writeShort((Short)val); + + break; + + case INT: + writeInt((Integer)val); + + break; + + case LONG: + writeLong((Long)val); + + break; + + case FLOAT: + writeFloat((Float)val); + + break; + + case DOUBLE: + writeDouble((Double)val); + + break; + + case CHAR: + writeChar((Character)val); + + break; + + case BOOLEAN: + writeBoolean((Boolean)val); + + break; + + case BYTE_ARR: + writeByteArray((byte[])val); + + break; + + case SHORT_ARR: + writeShortArray((short[])val); + + break; + + case INT_ARR: + writeIntArray((int[])val); + + break; + + case LONG_ARR: + writeLongArray((long[])val); + + break; + + case FLOAT_ARR: + writeFloatArray((float[])val); + + break; + + case DOUBLE_ARR: + writeDoubleArray((double[])val); + + break; + + case CHAR_ARR: + writeCharArray((char[])val); + + break; + + case BOOLEAN_ARR: + writeBooleanArray((boolean[])val); + + break; + + case STRING: + writeString((String)val); + + break; + + case BIT_SET: + writeBitSet((BitSet)val); + + break; + + case UUID: + writeUuid((UUID)val); + + break; + + case IGNITE_UUID: + writeIgniteUuid((IgniteUuid)val); + + break; + + case AFFINITY_TOPOLOGY_VERSION: + writeAffinityTopologyVersion((AffinityTopologyVersion)val); + + break; + case MSG: + try { + if (val != null) + writer.beforeInnerMessageWrite(); + + writeMessage((Message)val, writer); + } + finally { + if (val != null) + writer.afterInnerMessageWrite(lastFinished); + } + + break; + + default: + throw new IllegalArgumentException("Unknown type: " + type); + } + } + + /** {@inheritDoc} */ + @Override protected Object read(MessageCollectionItemType type, MessageReader reader) { + switch (type) { + case BYTE: + return readByte(); + + case SHORT: + return readShort(); + + case INT: + return readInt(); + + case LONG: + return readLong(); + + case FLOAT: + return readFloat(); + + case DOUBLE: + return readDouble(); + + case CHAR: + return readChar(); + + case BOOLEAN: + return readBoolean(); + + case BYTE_ARR: + return readByteArray(); + + case SHORT_ARR: + return readShortArray(); + + case INT_ARR: + return readIntArray(); + + case LONG_ARR: + return readLongArray(); + + case FLOAT_ARR: + return readFloatArray(); + + case DOUBLE_ARR: + return readDoubleArray(); + + case CHAR_ARR: + return readCharArray(); + + case BOOLEAN_ARR: + return readBooleanArray(); + + case STRING: + return readString(); + + case BIT_SET: + return readBitSet(); + + case UUID: + return readUuid(); + + case IGNITE_UUID: + return readIgniteUuid(); + + case AFFINITY_TOPOLOGY_VERSION: + return readAffinityTopologyVersion(); + + case MSG: + return readMessage(reader); + + default: + throw new IllegalArgumentException("Unknown type: " + type); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java new file mode 100644 index 0000000000000..3441742dfabd6 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObject.java @@ -0,0 +1,130 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.dto; + +import java.io.Externalizable; +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.ArrayList; +import java.util.Collection; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Set; +import org.jetbrains.annotations.Nullable; + +/** + * Base class for data transfer objects. + */ +public abstract class IgniteDataTransferObject implements Externalizable { + /** */ + private static final long serialVersionUID = 0L; + + /** Magic number to detect correct transfer objects. */ + private static final int MAGIC = 0x42BEEF00; + + /** Version 1. */ + protected static final byte V1 = 1; + + /** Version 2. */ + protected static final byte V2 = 2; + + /** Version 3. */ + protected static final byte V3 = 3; + + /** Version 4. */ + protected static final byte V4 = 4; + + /** Version 5. */ + protected static final byte V5 = 5; + + /** + * @param col Source collection. + * @param Collection type. + * @return List based on passed collection. + */ + @Nullable protected static List toList(Collection col) { + if (col != null) + return new ArrayList<>(col); + + return null; + } + + /** + * @param col Source collection. + * @param Collection type. + * @return List based on passed collection. + */ + @Nullable protected static Set toSet(Collection col) { + if (col != null) + return new LinkedHashSet<>(col); + + return null; + } + + /** + * @return Transfer object version. + */ + public byte getProtocolVersion() { + return V1; + } + + /** + * Save object's specific data content. + * + * @param out Output object to write data content. + * @throws IOException If I/O errors occur. + */ + protected abstract void writeExternalData(ObjectOutput out) throws IOException; + + /** {@inheritDoc} */ + @Override public void writeExternal(ObjectOutput out) throws IOException { + int hdr = MAGIC + getProtocolVersion(); + + out.writeInt(hdr); + + try (IgniteDataTransferObjectOutput dtout = new IgniteDataTransferObjectOutput(out)) { + writeExternalData(dtout); + } + } + + /** + * Load object's specific data content. + * + * @param protoVer Input object version. + * @param in Input object to load data content. + * @throws IOException If I/O errors occur. + * @throws ClassNotFoundException If the class for an object being restored cannot be found. + */ + protected abstract void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException; + + /** {@inheritDoc} */ + @Override public void readExternal(ObjectInput in) throws IOException, ClassNotFoundException { + int hdr = in.readInt(); + + if ((hdr & MAGIC) != MAGIC) + throw new IOException("Unexpected IgniteDataTransferObject header " + + "[actual=" + Integer.toHexString(hdr) + ", expected=" + Integer.toHexString(MAGIC) + "]"); + + byte ver = (byte)(hdr & 0xFF); + + try (IgniteDataTransferObjectInput dtin = new IgniteDataTransferObjectInput(in)) { + readExternalData(ver, dtin); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObjectInput.java b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObjectInput.java new file mode 100644 index 0000000000000..c12287520656a --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObjectInput.java @@ -0,0 +1,156 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.dto; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectInputStream; +import org.apache.ignite.internal.util.io.GridByteArrayInputStream; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.NotNull; + +/** + * Wrapper for object input. + */ +public class IgniteDataTransferObjectInput implements ObjectInput { + /** */ + private final ObjectInputStream ois; + + /** + * @param in Target input. + * @throws IOException If an I/O error occurs. + */ + public IgniteDataTransferObjectInput(ObjectInput in) throws IOException { + byte[] buf = U.readByteArray(in); + + /* */ + GridByteArrayInputStream bis = new GridByteArrayInputStream(buf); + ois = new ObjectInputStream(bis); + } + + + /** {@inheritDoc} */ + @Override public Object readObject() throws ClassNotFoundException, IOException { + return ois.readObject(); + } + + /** {@inheritDoc} */ + @Override public int read() throws IOException { + return ois.read(); + } + + /** {@inheritDoc} */ + @Override public int read(byte[] b) throws IOException { + return ois.read(b); + } + + /** {@inheritDoc} */ + @Override public int read(byte[] b, int off, int len) throws IOException { + return ois.read(b, off, len); + } + + /** {@inheritDoc} */ + @Override public long skip(long n) throws IOException { + return ois.skip(n); + } + + /** {@inheritDoc} */ + @Override public int available() throws IOException { + return ois.available(); + } + + /** {@inheritDoc} */ + @Override public void close() throws IOException { + ois.close(); + } + + /** {@inheritDoc} */ + @Override public void readFully(@NotNull byte[] b) throws IOException { + ois.readFully(b); + } + + /** {@inheritDoc} */ + @Override public void readFully(@NotNull byte[] b, int off, int len) throws IOException { + ois.readFully(b, off, len); + } + + /** {@inheritDoc} */ + @Override public int skipBytes(int n) throws IOException { + return ois.skipBytes(n); + } + + /** {@inheritDoc} */ + @Override public boolean readBoolean() throws IOException { + return ois.readBoolean(); + } + + /** {@inheritDoc} */ + @Override public byte readByte() throws IOException { + return ois.readByte(); + } + + /** {@inheritDoc} */ + @Override public int readUnsignedByte() throws IOException { + return ois.readUnsignedByte(); + } + + /** {@inheritDoc} */ + @Override public short readShort() throws IOException { + return ois.readShort(); + } + + /** {@inheritDoc} */ + @Override public int readUnsignedShort() throws IOException { + return ois.readUnsignedShort(); + } + + /** {@inheritDoc} */ + @Override public char readChar() throws IOException { + return ois.readChar(); + } + + /** {@inheritDoc} */ + @Override public int readInt() throws IOException { + return ois.readInt(); + } + + /** {@inheritDoc} */ + @Override public long readLong() throws IOException { + return ois.readLong(); + } + + /** {@inheritDoc} */ + @Override public float readFloat() throws IOException { + return ois.readFloat(); + } + + /** {@inheritDoc} */ + @Override public double readDouble() throws IOException { + return ois.readDouble(); + } + + /** {@inheritDoc} */ + @Override public String readLine() throws IOException { + return ois.readLine(); + } + + /** {@inheritDoc} */ + @NotNull @Override public String readUTF() throws IOException { + return ois.readUTF(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObjectOutput.java b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObjectOutput.java new file mode 100644 index 0000000000000..db4933cea29c0 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/dto/IgniteDataTransferObjectOutput.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.dto; + +import java.io.IOException; +import java.io.ObjectOutput; +import java.io.ObjectOutputStream; +import org.apache.ignite.internal.util.io.GridByteArrayOutputStream; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.NotNull; + +/** + * Wrapper for object output. + */ +public class IgniteDataTransferObjectOutput implements ObjectOutput { + /** */ + private final ObjectOutput out; + + /** */ + private final GridByteArrayOutputStream bos; + + /** */ + private final ObjectOutputStream oos; + + /** + * Constructor. + * + * @param out Target stream. + * @throws IOException If an I/O error occurs. + */ + public IgniteDataTransferObjectOutput(ObjectOutput out) throws IOException { + this.out = out; + + bos = new GridByteArrayOutputStream(); + oos = new ObjectOutputStream(bos); + } + + /** {@inheritDoc} */ + @Override public void writeObject(Object obj) throws IOException { + oos.writeObject(obj); + } + + /** {@inheritDoc} */ + @Override public void write(int b) throws IOException { + oos.write(b); + } + + /** {@inheritDoc} */ + @Override public void write(byte[] b) throws IOException { + oos.write(b); + } + + /** {@inheritDoc} */ + @Override public void write(byte[] b, int off, int len) throws IOException { + oos.write(b, off, len); + } + + /** {@inheritDoc} */ + @Override public void writeBoolean(boolean v) throws IOException { + oos.writeBoolean(v); + } + + /** {@inheritDoc} */ + @Override public void writeByte(int v) throws IOException { + oos.writeByte(v); + } + + /** {@inheritDoc} */ + @Override public void writeShort(int v) throws IOException { + oos.writeShort(v); + } + + /** {@inheritDoc} */ + @Override public void writeChar(int v) throws IOException { + oos.writeChar(v); + } + + /** {@inheritDoc} */ + @Override public void writeInt(int v) throws IOException { + oos.writeInt(v); + } + + /** {@inheritDoc} */ + @Override public void writeLong(long v) throws IOException { + oos.writeLong(v); + } + + /** {@inheritDoc} */ + @Override public void writeFloat(float v) throws IOException { + oos.writeFloat(v); + } + + /** {@inheritDoc} */ + @Override public void writeDouble(double v) throws IOException { + oos.writeDouble(v); + } + + /** {@inheritDoc} */ + @Override public void writeBytes(@NotNull String s) throws IOException { + oos.writeBytes(s); + } + + /** {@inheritDoc} */ + @Override public void writeChars(@NotNull String s) throws IOException { + oos.writeChars(s); + } + + /** {@inheritDoc} */ + @Override public void writeUTF(@NotNull String s) throws IOException { + oos.writeUTF(s); + } + + /** {@inheritDoc} */ + @Override public void flush() throws IOException { + oos.flush(); + } + + /** {@inheritDoc} */ + @Override public void close() throws IOException { + oos.flush(); + + U.writeByteArray(out, bos.internalArray(), bos.size()); + + oos.close(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/executor/GridExecutorService.java b/modules/core/src/main/java/org/apache/ignite/internal/executor/GridExecutorService.java index bd34599586410..868640ffb18bf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/executor/GridExecutorService.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/executor/GridExecutorService.java @@ -208,15 +208,13 @@ protected Object readResolve() throws ObjectStreamException { /** {@inheritDoc} */ @Override public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { - long now = U.currentTimeMillis(); + long startNanos = System.nanoTime(); timeout = TimeUnit.MILLISECONDS.convert(timeout, unit); - long end = timeout == 0 ? Long.MAX_VALUE : timeout + now; - // Prevent overflow. - if (end < 0) - end = Long.MAX_VALUE; + if (timeout <= 0) + timeout = Long.MAX_VALUE; List> locTasks; @@ -227,11 +225,13 @@ protected Object readResolve() throws ObjectStreamException { Iterator> iter = locTasks.iterator(); - while (iter.hasNext() && now < end) { + long passedMillis = 0L; + + while (iter.hasNext() && passedMillis < timeout) { IgniteInternalFuture fut = iter.next(); try { - fut.get(end - now); + fut.get(timeout - passedMillis); } catch (ComputeTaskTimeoutCheckedException e) { U.error(log, "Failed to get task result: " + fut, e); @@ -245,7 +245,7 @@ protected Object readResolve() throws ObjectStreamException { throw new InterruptedException("Got interrupted while waiting for task completion."); } - now = U.currentTimeMillis(); + passedMillis = U.millisSinceNanos(startNanos); } return true; @@ -343,20 +343,20 @@ protected Object readResolve() throws ObjectStreamException { A.ensure(timeout >= 0, "timeout >= 0"); A.notNull(unit, "unit != null"); - long now = U.currentTimeMillis(); + long startNanos = System.nanoTime(); timeout = TimeUnit.MILLISECONDS.convert(timeout, unit); - long end = timeout == 0 ? Long.MAX_VALUE : timeout + now; - // Prevent overflow. - if (end < 0) - end = Long.MAX_VALUE; + if (timeout <= 0) + timeout = Long.MAX_VALUE; checkShutdown(); Collection> taskFuts = new ArrayList<>(); + long passedMillis = 0L; + for (Callable task : tasks) { // Execute task without predefined timeout. // GridFuture.cancel() will be called if timeout elapsed. @@ -373,15 +373,15 @@ protected Object readResolve() throws ObjectStreamException { taskFuts.add(fut); - now = U.currentTimeMillis(); + passedMillis = U.millisSinceNanos(startNanos); } boolean isInterrupted = false; for (IgniteInternalFuture fut : taskFuts) { - if (!isInterrupted && now < end) { + if (!isInterrupted && passedMillis < timeout) { try { - fut.get(end - now); + fut.get(timeout - passedMillis); } catch (ComputeTaskTimeoutCheckedException ignore) { if (log.isDebugEnabled()) @@ -400,7 +400,7 @@ protected Object readResolve() throws ObjectStreamException { } } - now = U.currentTimeMillis(); + passedMillis = U.millisSinceNanos(startNanos); } // Throw exception if any task wait was interrupted. @@ -480,15 +480,13 @@ private void cancelFuture(IgniteInternalFuture fut) { A.ensure(timeout >= 0, "timeout >= 0"); A.notNull(unit, "unit != null"); - long now = System.currentTimeMillis(); + long startNanos = System.nanoTime(); timeout = TimeUnit.MILLISECONDS.convert(timeout, unit); - long end = timeout == 0 ? Long.MAX_VALUE : timeout + now; - // Prevent overflow. - if (end < 0) - end = Long.MAX_VALUE; + if (timeout <= 0) + timeout = Long.MAX_VALUE; checkShutdown(); @@ -518,13 +516,13 @@ private void cancelFuture(IgniteInternalFuture fut) { int errCnt = 0; for (IgniteInternalFuture fut : taskFuts) { - now = U.currentTimeMillis(); + long passedMillis = U.millisSinceNanos(startNanos); boolean cancel = false; - if (!isInterrupted && !isResRcvd && now < end) { + if (!isInterrupted && !isResRcvd && passedMillis < timeout) { try { - res = fut.get(end - now); + res = fut.get(timeout - passedMillis); isResRcvd = true; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnection.java index 064a6f6c0ed2e..3ed53a5674115 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnection.java @@ -65,8 +65,12 @@ */ @Deprecated public class JdbcConnection implements Connection { - /** Validation task name. */ - private static final String VALID_TASK_NAME = + /** Connection validation task name. */ + private static final String CONNECTION_VALID_TASK_NAME = + "org.apache.ignite.internal.jdbc.JdbcConnectionValidationTask"; + + /** Cache validation task name. */ + private static final String CACHE_VALID_TASK_NAME = "org.apache.ignite.internal.processors.cache.query.jdbc.GridCacheQueryJdbcValidationTask"; /** Ignite client. */ @@ -87,6 +91,9 @@ public class JdbcConnection implements Connection { /** Timeout. */ private int timeout; + /** Whether using security. */ + private boolean usingSecurity; + /** * Creates new connection. * @@ -115,10 +122,14 @@ public JdbcConnection(String url, Properties props) throws SQLException { String passwd = props.getProperty("password"); if (!F.isEmpty(user)) { + usingSecurity = true; + SecurityCredentials creds = new SecurityCredentials(user, passwd); cfg.setSecurityCredentialsProvider(new SecurityCredentialsBasicProvider(creds)); } + else + usingSecurity = false; // Disable all fetching and caching for metadata. cfg.setEnableMetricsCache(false); @@ -453,7 +464,9 @@ public JdbcConnection(String url, Properties props) throws SQLException { throw new SQLException("Invalid timeout: " + timeout); try { - return client.compute().executeAsync(VALID_TASK_NAME, cacheName).get(timeout, SECONDS); + return client.compute() + .executeAsync(usingSecurity ? CONNECTION_VALID_TASK_NAME : CACHE_VALID_TASK_NAME, cacheName) + .get(timeout, SECONDS); } catch (GridClientDisconnectedException | GridClientFutureTimeoutException e) { throw new SQLException("Failed to establish connection.", e); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnectionValidationTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnectionValidationTask.java new file mode 100644 index 0000000000000..d35bfb86c84f1 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/JdbcConnectionValidationTask.java @@ -0,0 +1,54 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.jdbc; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import org.apache.ignite.IgniteException; +import org.apache.ignite.compute.ComputeJob; +import org.apache.ignite.compute.ComputeJobAdapter; +import org.apache.ignite.compute.ComputeJobResult; +import org.apache.ignite.compute.ComputeTaskSplitAdapter; +import org.apache.ignite.internal.util.typedef.F; +import org.jetbrains.annotations.Nullable; + +/** + * This task is used for JDBCConnection validation. + * + * @deprecated Using Ignite client node based JDBC driver is preferable. + * See documentation of {@link org.apache.ignite.IgniteJdbcDriver} for details. + */ +@Deprecated +public class JdbcConnectionValidationTask extends ComputeTaskSplitAdapter { + /** */ + private static final long serialVersionUID = 0L; + + /** {@inheritDoc} */ + @Override protected Collection split(int gridSize, Object arg) throws IgniteException { + return Collections.singletonList(new ComputeJobAdapter() { + @Override public Object execute() throws IgniteException { + return true; + } + }); + } + + /** {@inheritDoc} */ + @Nullable @Override public Boolean reduce(List results) throws IgniteException { + return F.first(results).getData(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java index 5d770054f46d4..86dc2980ecd9a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/ConnectionPropertiesImpl.java @@ -23,8 +23,6 @@ import java.util.Arrays; import java.util.Properties; import java.util.StringTokenizer; -import javax.naming.RefAddr; -import javax.naming.Reference; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.ClientConnectorConfiguration; import org.apache.ignite.internal.processors.odbc.SqlStateCode; @@ -44,6 +42,9 @@ public class ConnectionPropertiesImpl implements ConnectionProperties, Serializa /** Default socket buffer size. */ private static final int DFLT_SOCK_BUFFER_SIZE = 64 * 1024; + /** Property: schema. */ + private static final String PROP_SCHEMA = "schema"; + /** Connection URL. */ private String url; @@ -51,7 +52,7 @@ public class ConnectionPropertiesImpl implements ConnectionProperties, Serializa private HostAndPortRange [] addrs; /** Schema name. Hidden property. Is used to set default schema name part of the URL. */ - private StringProperty schema = new StringProperty("schema", + private StringProperty schema = new StringProperty(PROP_SCHEMA, "Schema name of the connection", "PUBLIC", null, false, null); /** Distributed joins property. */ @@ -487,21 +488,113 @@ private void parseUrl(String url, Properties props) throws SQLException { String nakedUrl = url.substring(JdbcThinUtils.URL_PREFIX.length()).trim(); - int pathPartEndPos = nakedUrl.indexOf('?'); + parseUrl0(nakedUrl, props); + } + + /** + * Parse naked URL (i.e. without {@link JdbcThinUtils#URL_PREFIX}). + * + * @param url Naked URL. + * @param props Properties. + * @throws SQLException If failed. + */ + private void parseUrl0(String url, Properties props) throws SQLException { + // Determine mode - semicolon or ampersand. + int semicolonPos = url.indexOf(";"); + int slashPos = url.indexOf("/"); + int queryPos = url.indexOf("?"); + + boolean semicolonMode; + + if (semicolonPos == -1 && slashPos == -1 && queryPos == -1) + // No special char -> any mode could be used, choose semicolon for simplicity. + semicolonMode = true; + else { + if (semicolonPos != -1) { + // Use semicolon mode if it appears earlier than slash or query. + semicolonMode = + (slashPos == -1 || semicolonPos < slashPos) && (queryPos == -1 || semicolonPos < queryPos); + } + else + // Semicolon is not found. + semicolonMode = false; + } + + if (semicolonMode) + parseUrlWithSemicolon(url, props); + else + parseUrlWithQuery(url, props); + } + + /** + * Parse URL in semicolon mode. + * + * @param url Naked URL + * @param props Properties. + * @throws SQLException If failed. + */ + private void parseUrlWithSemicolon(String url, Properties props) throws SQLException { + int pathPartEndPos = url.indexOf(';'); if (pathPartEndPos == -1) - pathPartEndPos = nakedUrl.length(); + pathPartEndPos = url.length(); - String pathPart = nakedUrl.substring(0, pathPartEndPos); + String pathPart = url.substring(0, pathPartEndPos); String paramPart = null; - if (pathPartEndPos > 0 && pathPartEndPos < nakedUrl.length()) - paramPart = nakedUrl.substring(pathPartEndPos + 1, nakedUrl.length()); + if (pathPartEndPos > 0 && pathPartEndPos < url.length()) + paramPart = url.substring(pathPartEndPos + 1, url.length()); + + parseEndpoints(pathPart); + + if (!F.isEmpty(paramPart)) + parseParameters(paramPart, props, ";"); + } + + /** + * Parse URL in query mode. + * + * @param url Naked URL + * @param props Properties. + * @throws SQLException If failed. + */ + private void parseUrlWithQuery(String url, Properties props) throws SQLException { + int pathPartEndPos = url.indexOf('?'); + + if (pathPartEndPos == -1) + pathPartEndPos = url.length(); + + String pathPart = url.substring(0, pathPartEndPos); + + String paramPart = null; + + if (pathPartEndPos > 0 && pathPartEndPos < url.length()) + paramPart = url.substring(pathPartEndPos + 1, url.length()); String[] pathParts = pathPart.split("/"); - String [] endpoints = pathParts[0].split(","); + parseEndpoints(pathParts[0]); + + if (pathParts.length > 2) { + throw new SQLException("Invalid URL format (only schema name is allowed in URL path parameter " + + "'host:port[/schemaName]'): " + this.url, SqlStateCode.CLIENT_CONNECTION_FAILED); + } + + setSchema(pathParts.length == 2 ? pathParts[1] : null); + + if (!F.isEmpty(paramPart)) + parseParameters(paramPart, props, "&"); + } + + /** + * Parse endpoints. + * + * @param endpointStr Endpoint string. + * @throws SQLException If failed. + */ + private void parseEndpoints(String endpointStr) throws SQLException { + String [] endpoints = endpointStr.split(","); if (endpoints.length > 0) addrs = new HostAndPortRange[endpoints.length]; @@ -519,16 +612,6 @@ private void parseUrl(String url, Properties props) throws SQLException { if (F.isEmpty(addrs) || F.isEmpty(addrs[0].host())) throw new SQLException("Host name is empty", SqlStateCode.CLIENT_CONNECTION_FAILED); - - if (pathParts.length > 2) { - throw new SQLException("Invalid URL format (only schema name is allowed in URL path parameter " + - "'host:port[/schemaName]'): " + url, SqlStateCode.CLIENT_CONNECTION_FAILED); - } - - setSchema(pathParts.length == 2 ? pathParts[1] : null); - - if (!F.isEmpty(paramPart)) - parseParameters(paramPart, props); } /** @@ -536,10 +619,11 @@ private void parseUrl(String url, Properties props) throws SQLException { * * @param paramStr Parameters string. * @param props Properties. + * @param delimChar Delimiter character. * @throws SQLException If failed. */ - private void parseParameters(String paramStr, Properties props) throws SQLException { - StringTokenizer st = new StringTokenizer(paramStr, "&"); + private void parseParameters(String paramStr, Properties props, String delimChar) throws SQLException { + StringTokenizer st = new StringTokenizer(paramStr, delimChar); boolean insideBrace = false; @@ -553,8 +637,8 @@ private void parseParameters(String paramStr, Properties props) throws SQLExcept int eqSymPos = token.indexOf('='); if (eqSymPos < 0) { - throw new SQLException("Invalid parameter format " + - "(URL properties format: key0=value0&key1=value1&... etc. pair: " + token); + throw new SQLException("Invalid parameter format (should be \"key1=val1" + delimChar + + "key2=val2" + delimChar + "...\"): " + token); } if (eqSymPos == token.length()) @@ -570,7 +654,7 @@ private void parseParameters(String paramStr, Properties props) throws SQLExcept } } else - val += "&" + token; + val += delimChar + token; if (val.endsWith("}")) { insideBrace = false; @@ -587,22 +671,24 @@ private void parseParameters(String paramStr, Properties props) throws SQLExcept if (key.isEmpty() || val.isEmpty()) throw new SQLException("Invalid parameter format (key and value cannot be empty): " + token); - props.setProperty(PROP_PREFIX + key, val); + if (PROP_SCHEMA.equalsIgnoreCase(key)) + setSchema(val); + else + props.setProperty(PROP_PREFIX + key, val); } } } - /** * @return Driver's properties info array. */ public DriverPropertyInfo[] getDriverPropertyInfo() { - DriverPropertyInfo[] dpis = new DriverPropertyInfo[propsArray.length]; + DriverPropertyInfo[] infos = new DriverPropertyInfo[propsArray.length]; for (int i = 0; i < propsArray.length; ++i) - dpis[i] = propsArray[i].getDriverPropertyInfo(); + infos[i] = propsArray[i].getDriverPropertyInfo(); - return dpis; + return infos; } /** @@ -740,23 +826,6 @@ protected void checkChoices(String strVal) throws SQLException { } } - /** - * @param ref Reference object. - * @throws SQLException On error. - */ - void init(Reference ref) throws SQLException { - RefAddr refAddr = ref.get(name); - - if (refAddr != null) { - String str = (String) refAddr.getContent(); - - if (validator != null) - validator.validate(str); - - init(str); - } - } - /** * @param str String representation of the * @throws SQLException on error. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java index 6bf768ba917fb..0a2e4b4cbee44 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinConnection.java @@ -35,17 +35,22 @@ import java.sql.Statement; import java.sql.Struct; import java.util.ArrayList; +import java.util.Collections; +import java.util.IdentityHashMap; import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.Set; import java.util.concurrent.Executor; +import java.util.concurrent.Semaphore; import java.util.logging.Level; import java.util.logging.Logger; +import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode; import org.apache.ignite.internal.processors.odbc.ClientListenerResponse; import org.apache.ignite.internal.processors.odbc.SqlStateCode; -import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBatchExecuteRequest; -import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBatchExecuteResult; +import org.apache.ignite.internal.processors.odbc.jdbc.JdbcOrderedBatchExecuteRequest; +import org.apache.ignite.internal.processors.odbc.jdbc.JdbcOrderedBatchExecuteResult; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQuery; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQueryExecuteRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcRequest; @@ -55,6 +60,7 @@ import org.apache.ignite.internal.processors.query.QueryUtils; import org.apache.ignite.internal.sql.command.SqlCommand; import org.apache.ignite.internal.sql.command.SqlSetStreamingCommand; +import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.lang.IgniteProductVersion; @@ -72,6 +78,9 @@ public class JdbcThinConnection implements Connection { /** Logger. */ private static final Logger LOG = Logger.getLogger(JdbcThinConnection.class.getName()); + /** Statements modification mutex. */ + final private Object stmtsMux = new Object(); + /** Schema name. */ private String schema; @@ -88,7 +97,7 @@ public class JdbcThinConnection implements Connection { private boolean readOnly; /** Streaming flag. */ - private volatile boolean stream; + private volatile StreamState streamState; /** Current transaction holdability. */ private int holdability; @@ -105,20 +114,11 @@ public class JdbcThinConnection implements Connection { /** Connection properties. */ private ConnectionProperties connProps; - /** Batch size for streaming. */ - private int streamBatchSize; - - /** Batch for streaming. */ - private List streamBatch; - - /** Last added query to recognize batches. */ - private String lastStreamQry; - /** Connected. */ private boolean connected; /** Tracked statements to close on disconnect. */ - private ArrayList stmts = new ArrayList<>(); + private final Set stmts = Collections.newSetFromMap(new IdentityHashMap<>()); /** * Creates new connection. @@ -148,6 +148,8 @@ private synchronized void ensureConnected() throws SQLException { if (connected) return; + assert !closed; + cliIo.start(); connected = true; @@ -169,7 +171,7 @@ private synchronized void ensureConnected() throws SQLException { * @return Whether this connection is streamed or not. */ boolean isStream() { - return stream; + return streamState != null; } /** @@ -179,24 +181,28 @@ boolean isStream() { */ void executeNative(String sql, SqlCommand cmd) throws SQLException { if (cmd instanceof SqlSetStreamingCommand) { - // If streaming is already on, we have to disable it first. - if (stream) { - // We have to send request regardless of actual batch size. - executeBatch(true); + SqlSetStreamingCommand cmd0 = (SqlSetStreamingCommand)cmd; + + // If streaming is already on, we have to close it first. + if (streamState != null) { + streamState.close(); - stream = false; + streamState = null; } boolean newVal = ((SqlSetStreamingCommand)cmd).isTurnOn(); // Actual ON, if needed. if (newVal) { + if (!cmd0.isOrdered() && !cliIo.isUnorderedStreamSupported()) { + throw new SQLException("Streaming without order doesn't supported by server [remoteNodeVer=" + + cliIo.igniteVersion() + ']', SqlStateCode.INTERNAL_ERROR); + } + sendRequest(new JdbcQueryExecuteRequest(JdbcStatementType.ANY_STATEMENT_TYPE, schema, 1, 1, sql, null)); - streamBatchSize = ((SqlSetStreamingCommand)cmd).batchSize(); - - stream = true; + streamState = new StreamState((SqlSetStreamingCommand)cmd); } } else @@ -211,39 +217,9 @@ void executeNative(String sql, SqlCommand cmd) throws SQLException { * @throws SQLException On error. */ void addBatch(String sql, List args) throws SQLException { - boolean newQry = (args == null || !F.eq(lastStreamQry, sql)); + assert isStream(); - // Providing null as SQL here allows for recognizing subbatches on server and handling them more efficiently. - JdbcQuery q = new JdbcQuery(newQry ? sql : null, args != null ? args.toArray() : null); - - if (streamBatch == null) - streamBatch = new ArrayList<>(streamBatchSize); - - streamBatch.add(q); - - // Null args means "addBatch(String)" was called on non-prepared Statement, - // we don't want to remember its query string. - lastStreamQry = (args != null ? sql : null); - - if (streamBatch.size() == streamBatchSize) - executeBatch(false); - } - - /** - * @param lastBatch Whether open data streamers must be flushed and closed after this batch. - * @throws SQLException if failed. - */ - private void executeBatch(boolean lastBatch) throws SQLException { - JdbcBatchExecuteResult res = sendRequest(new JdbcBatchExecuteRequest(schema, streamBatch, lastBatch)); - - streamBatch = null; - - lastStreamQry = null; - - if (res.errorCode() != ClientListenerResponse.STATUS_SUCCESS) { - throw new BatchUpdateException(res.errorMessage(), IgniteQueryErrorCode.codeToSqlState(res.errorCode()), - res.errorCode(), res.updateCounts()); - } + streamState.addBatch(sql, args); } /** {@inheritDoc} */ @@ -268,7 +244,9 @@ private void executeBatch(boolean lastBatch) throws SQLException { if (timeout > 0) stmt.timeout(timeout); - stmts.add(stmt); + synchronized (stmtsMux) { + stmts.add(stmt); + } return stmt; } @@ -299,7 +277,9 @@ private void executeBatch(boolean lastBatch) throws SQLException { if (timeout > 0) stmt.timeout(timeout); - stmts.add(stmt); + synchronized (stmtsMux) { + stmts.add(stmt); + } return stmt; } @@ -392,13 +372,14 @@ private void checkCursorOptions(int resSetType, int resSetConcurrency, if (isClosed()) return; - if (!F.isEmpty(streamBatch)) { - try { - executeBatch(true); - } - catch (SQLException e) { - LOG.log(Level.WARNING, "Exception during batch send on streamed connection close", e); - } + if (streamState != null) { + streamState.close(); + + streamState = null; + } + + synchronized (stmtsMux) { + stmts.clear(); } closed = true; @@ -790,6 +771,28 @@ R sendRequest(JdbcRequest req) throws SQLException { } } + /** + * Send request for execution via {@link #cliIo}. Response is waited at the separate thread + * (see {@link StreamState#asyncRespReaderThread}). + * @param req Request. + * @throws SQLException On any error. + */ + private void sendRequestNotWaitResponse(JdbcOrderedBatchExecuteRequest req) throws SQLException { + ensureConnected(); + + try { + cliIo.sendBatchRequestNoWaitResponse(req); + } + catch (SQLException e) { + throw e; + } + catch (Exception e) { + onDisconnect(); + + throw new SQLException("Failed to communicate with Ignite cluster.", SqlStateCode.CONNECTION_FAILURE, e); + } + } + /** * @return Connection URL. */ @@ -808,14 +811,18 @@ private void onDisconnect() { connected = false; - streamBatch = null; + if (streamState != null) { + streamState.close0(); - lastStreamQry = null; + streamState = null; + } - for (JdbcThinStatement s : stmts) - s.closeOnDisconnect(); + synchronized (stmtsMux) { + for (JdbcThinStatement s : stmts) + s.closeOnDisconnect(); - stmts.clear(); + stmts.clear(); + } } /** @@ -837,4 +844,212 @@ private static String normalizeSchema(String schemaName) { return res; } + + /** + * @param stmt Statement to close. + */ + void closeStatement(JdbcThinStatement stmt) { + synchronized (stmtsMux) { + stmts.remove(stmt); + } + } + + /** + * Streamer state and + */ + private class StreamState { + /** Maximum requests count that may be sent before any responses. */ + private static final int MAX_REQUESTS_BEFORE_RESPONSE = 10; + + /** Wait timeout. */ + private static final long WAIT_TIMEOUT = 1; + + /** Batch size for streaming. */ + private int streamBatchSize; + + /** Batch for streaming. */ + private List streamBatch; + + /** Last added query to recognize batches. */ + private String lastStreamQry; + + /** Keep request order on execution. */ + private long order; + + /** Async response reader thread. */ + private Thread asyncRespReaderThread; + + /** Async response error. */ + private volatile Exception err; + + /** The order of the last batch request at the stream. */ + private long lastRespOrder = -1; + + /** Last response future. */ + private final GridFutureAdapter lastRespFut = new GridFutureAdapter<>(); + + /** Response semaphore sem. */ + private Semaphore respSem = new Semaphore(MAX_REQUESTS_BEFORE_RESPONSE); + + /** + * @param cmd Stream cmd. + */ + StreamState(SqlSetStreamingCommand cmd) { + streamBatchSize = cmd.batchSize(); + + asyncRespReaderThread = new Thread(this::readResponses); + + asyncRespReaderThread.start(); + } + + /** + * Add another query for batched execution. + * @param sql Query. + * @param args Arguments. + * @throws SQLException On error. + */ + void addBatch(String sql, List args) throws SQLException { + checkError(); + + boolean newQry = (args == null || !F.eq(lastStreamQry, sql)); + + // Providing null as SQL here allows for recognizing subbatches on server and handling them more efficiently. + JdbcQuery q = new JdbcQuery(newQry ? sql : null, args != null ? args.toArray() : null); + + if (streamBatch == null) + streamBatch = new ArrayList<>(streamBatchSize); + + streamBatch.add(q); + + // Null args means "addBatch(String)" was called on non-prepared Statement, + // we don't want to remember its query string. + lastStreamQry = (args != null ? sql : null); + + if (streamBatch.size() == streamBatchSize) + executeBatch(false); + } + + /** + * @param lastBatch Whether open data streamers must be flushed and closed after this batch. + * @throws SQLException if failed. + */ + private void executeBatch(boolean lastBatch) throws SQLException { + checkError(); + + if (lastBatch) + lastRespOrder = order; + + try { + respSem.acquire(); + + sendRequestNotWaitResponse( + new JdbcOrderedBatchExecuteRequest(schema, streamBatch, lastBatch, order)); + + streamBatch = null; + + lastStreamQry = null; + + if (lastBatch) { + try { + lastRespFut.get(); + } + catch (IgniteCheckedException e) { + // No-op. + // No exceptions are expected here. + } + + checkError(); + } + else + order++; + } + catch (InterruptedException e) { + throw new SQLException("Streaming operation was interrupted", SqlStateCode.INTERNAL_ERROR, e); + } + } + + /** + * Throws at the user thread exception that was thrown at the {@link #asyncRespReaderThread} thread. + * @throws SQLException Saved exception. + */ + void checkError() throws SQLException { + if (err != null) { + Exception err0 = err; + + err = null; + + if (err0 instanceof SQLException) + throw (SQLException)err0; + else { + onDisconnect(); + + throw new SQLException("Failed to communicate with Ignite cluster on JDBC streaming.", + SqlStateCode.CONNECTION_FAILURE, err0); + } + } + } + + /** + * @throws SQLException On error. + */ + void close() throws SQLException { + close0(); + + checkError(); + } + + /** + */ + void close0() { + if (connected) { + try { + executeBatch(true); + } + catch (SQLException e) { + err = e; + + LOG.log(Level.WARNING, "Exception during batch send on streamed connection close", e); + } + } + + if (asyncRespReaderThread != null) + asyncRespReaderThread.interrupt(); + } + + /** + * + */ + void readResponses () { + try { + while (true) { + JdbcResponse resp = cliIo.readResponse(); + + if (resp.response() instanceof JdbcOrderedBatchExecuteResult) { + JdbcOrderedBatchExecuteResult res = (JdbcOrderedBatchExecuteResult)resp.response(); + + respSem.release(); + + if (res.errorCode() != ClientListenerResponse.STATUS_SUCCESS) { + err = new BatchUpdateException(res.errorMessage(), + IgniteQueryErrorCode.codeToSqlState(res.errorCode()), + res.errorCode(), res.updateCounts()); + } + + // Receive the response for the last request. + if (res.order() == lastRespOrder) { + lastRespFut.onDone(); + + break; + } + } + + if (resp.status() != ClientListenerResponse.STATUS_SUCCESS) + err = new SQLException(resp.error(), IgniteQueryErrorCode.codeToSqlState(resp.status())); + } + } + catch (Exception e) { + err = e; + } + } + } } \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java index 30e446f65e3bf..96691d93bf91a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinStatement.java @@ -352,6 +352,8 @@ private JdbcResult sendFile(JdbcBulkLoadAckResult cmdRes) throws SQLException { try { closeResults(); + + conn.closeStatement(this); } finally { closed = true; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java index bfd553d3b93fa..9661e57f8018d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc/thin/JdbcThinTcpIo.java @@ -27,7 +27,8 @@ import java.sql.SQLException; import java.util.ArrayList; import java.util.List; -import java.util.Random; +import java.util.concurrent.atomic.AtomicLong; + import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.binary.BinaryReaderExImpl; import org.apache.ignite.internal.binary.BinaryWriterExImpl; @@ -38,6 +39,7 @@ import org.apache.ignite.internal.processors.odbc.ClientListenerRequest; import org.apache.ignite.internal.processors.odbc.SqlStateCode; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcBatchExecuteRequest; +import org.apache.ignite.internal.processors.odbc.jdbc.JdbcOrderedBatchExecuteRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQuery; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQueryCloseRequest; import org.apache.ignite.internal.processors.odbc.jdbc.JdbcQueryFetchRequest; @@ -91,7 +93,7 @@ public class JdbcThinTcpIo { private static final int QUERY_CLOSE_MSG_SIZE = 9; /** Random. */ - private static final Random RND = new Random(U.currentTimeMillis()); + private static final AtomicLong IDX_GEN = new AtomicLong(); /** Connection properties. */ private final ConnectionProperties connProps; @@ -105,14 +107,23 @@ public class JdbcThinTcpIo { /** Input stream. */ private BufferedInputStream in; - /** Closed flag. */ - private boolean closed; + /** Connected flag. */ + private boolean connected; /** Ignite server version. */ private IgniteProductVersion igniteVer; - /** Address index. */ - private int srvIdx; + /** Ignite server version. */ + private Thread ownThread; + + /** Mutex. */ + private final Object mux = new Object(); + + /** Current protocol version used to connection to Ignite. */ + private ClientListenerProtocolVersion srvProtocolVer; + + /** Server index. */ + private volatile int srvIdx; /** * Constructor. @@ -121,9 +132,6 @@ public class JdbcThinTcpIo { */ public JdbcThinTcpIo(ConnectionProperties connProps) { this.connProps = connProps; - - // Try to connect to random address then round robin. - srvIdx = RND.nextInt(connProps.getAddresses().length); } /** @@ -140,66 +148,83 @@ public void start() throws SQLException, IOException { * @throws IOException On IO error in handshake. */ public void start(int timeout) throws SQLException, IOException { - List inaccessibleAddrs = null; + synchronized (mux) { + if (ownThread != null) { + throw new SQLException("Concurrent access to JDBC connection is not allowed" + + " [ownThread=" + ownThread.getName() + + ", curThread=" + Thread.currentThread().getName(), SqlStateCode.CLIENT_CONNECTION_FAILED); + } + + ownThread = Thread.currentThread(); + } - List exceptions = null; + assert !connected; - HostAndPortRange[] srvs = connProps.getAddresses(); + try { + List inaccessibleAddrs = null; - boolean connected = false; + List exceptions = null; - for (int i = 0; i < srvs.length; i++, srvIdx = (srvIdx + 1) % srvs.length) { - HostAndPortRange srv = srvs[srvIdx]; + HostAndPortRange[] srvs = connProps.getAddresses(); - InetAddress[] addrs = getAllAddressesByHost(srv.host()); + for (int i = 0; i < srvs.length; i++) { + srvIdx = nextServerIndex(srvs.length); - for (InetAddress addr : addrs) { - for (int port = srv.portFrom(); port <= srv.portTo(); ++port) { - try { - connect(new InetSocketAddress(addr, port), timeout); + HostAndPortRange srv = srvs[srvIdx]; - connected = true; + InetAddress[] addrs = getAllAddressesByHost(srv.host()); - break; - } - catch (IOException | SQLException exception) { - if (inaccessibleAddrs == null) - inaccessibleAddrs = new ArrayList<>(); + for (InetAddress addr : addrs) { + for (int port = srv.portFrom(); port <= srv.portTo(); ++port) { + try { + connect(new InetSocketAddress(addr, port), timeout); + + break; + } + catch (IOException | SQLException exception) { + if (inaccessibleAddrs == null) + inaccessibleAddrs = new ArrayList<>(); - inaccessibleAddrs.add(addr.getHostName()); + inaccessibleAddrs.add(addr.getHostName()); - if (exceptions == null) - exceptions = new ArrayList<>(); + if (exceptions == null) + exceptions = new ArrayList<>(); - exceptions.add(exception); + exceptions.add(exception); + } } } + + if (connected) + break; } - if (connected) - break; - } + if (!connected && inaccessibleAddrs != null && exceptions != null) { + if (exceptions.size() == 1) { + Exception ex = exceptions.get(0); - if (!connected && inaccessibleAddrs != null && exceptions != null) { - if (exceptions.size() == 1) { - Exception ex = exceptions.get(0); + if (ex instanceof SQLException) + throw (SQLException)ex; + else if (ex instanceof IOException) + throw (IOException)ex; + } - if (ex instanceof SQLException) - throw (SQLException)ex; - else if (ex instanceof IOException) - throw (IOException)ex; - } + SQLException e = new SQLException("Failed to connect to server [url=" + connProps.getUrl() + ']', + SqlStateCode.CLIENT_CONNECTION_FAILED); - SQLException e = new SQLException("Failed to connect to server [url=" + connProps.getUrl() + ']', - SqlStateCode.CLIENT_CONNECTION_FAILED); + for (Exception ex : exceptions) + e.addSuppressed(ex); - for (Exception ex : exceptions) - e.addSuppressed(ex); + throw e; + } - throw e; + handshake(CURRENT_VER); + } + finally { + synchronized (mux) { + ownThread = null; + } } - - handshake(CURRENT_VER); } /** @@ -211,43 +236,53 @@ else if (ex instanceof IOException) * @throws SQLException On connection reject. */ private void connect(InetSocketAddress addr, int timeout) throws IOException, SQLException { - Socket sock; + Socket sock = null; - if (ConnectionProperties.SSL_MODE_REQUIRE.equalsIgnoreCase(connProps.getSslMode())) - sock = JdbcThinSSLUtil.createSSLSocket(addr, connProps); - else if (ConnectionProperties.SSL_MODE_DISABLE.equalsIgnoreCase(connProps.getSslMode())) { - sock = new Socket(); + try { + if (ConnectionProperties.SSL_MODE_REQUIRE.equalsIgnoreCase(connProps.getSslMode())) + sock = JdbcThinSSLUtil.createSSLSocket(addr, connProps); + else if (ConnectionProperties.SSL_MODE_DISABLE.equalsIgnoreCase(connProps.getSslMode())) { + sock = new Socket(); - try { - sock.connect(addr, timeout); + try { + sock.connect(addr, timeout); + } + catch (IOException e) { + throw new SQLException("Failed to connect to server [host=" + addr.getHostName() + + ", port=" + addr.getPort() + ']', SqlStateCode.CLIENT_CONNECTION_FAILED, e); + } } - catch (IOException e) { - throw new SQLException("Failed to connect to server [host=" + addr.getHostName() + - ", port=" + addr.getPort() + ']', SqlStateCode.CLIENT_CONNECTION_FAILED, e); + else { + throw new SQLException("Unknown sslMode. [sslMode=" + connProps.getSslMode() + ']', + SqlStateCode.CLIENT_CONNECTION_FAILED); } - } - else { - throw new SQLException("Unknown sslMode. [sslMode=" + connProps.getSslMode() + ']', - SqlStateCode.CLIENT_CONNECTION_FAILED); - } - if (connProps.getSocketSendBuffer() != 0) - sock.setSendBufferSize(connProps.getSocketSendBuffer()); + if (connProps.getSocketSendBuffer() != 0) + sock.setSendBufferSize(connProps.getSocketSendBuffer()); - if (connProps.getSocketReceiveBuffer() != 0) - sock.setReceiveBufferSize(connProps.getSocketReceiveBuffer()); + if (connProps.getSocketReceiveBuffer() != 0) + sock.setReceiveBufferSize(connProps.getSocketReceiveBuffer()); - sock.setTcpNoDelay(connProps.isTcpNoDelay()); + sock.setTcpNoDelay(connProps.isTcpNoDelay()); - try { - endpoint = new IpcClientTcpEndpoint(sock); + try { + endpoint = new IpcClientTcpEndpoint(sock); + + out = new BufferedOutputStream(endpoint.outputStream()); + in = new BufferedInputStream(endpoint.inputStream()); - out = new BufferedOutputStream(endpoint.outputStream()); - in = new BufferedInputStream(endpoint.inputStream()); + connected = true; + } + catch (IgniteCheckedException e) { + throw new SQLException("Failed to connect to server [url=" + connProps.getUrl() + ']', + SqlStateCode.CLIENT_CONNECTION_FAILED, e); + } } - catch (IgniteCheckedException e) { - throw new SQLException("Failed to connect to server [url=" + connProps.getUrl() + ']', - SqlStateCode.CLIENT_CONNECTION_FAILED, e); + catch (Exception e) { + if (sock != null && !sock.isClosed()) + U.closeQuiet(sock); + + throw e; } } @@ -318,6 +353,8 @@ public void handshake(ClientListenerProtocolVersion ver) throws IOException, SQL } else igniteVer = new IgniteProductVersion((byte)2, (byte)0, (byte)0, "Unknown", 0L, null); + + srvProtocolVer = ver; } else { short maj = reader.readShort(); @@ -326,22 +363,22 @@ public void handshake(ClientListenerProtocolVersion ver) throws IOException, SQL String err = reader.readString(); - ClientListenerProtocolVersion srvProtocolVer = ClientListenerProtocolVersion.create(maj, min, maintenance); + ClientListenerProtocolVersion srvProtoVer0 = ClientListenerProtocolVersion.create(maj, min, maintenance); - if (srvProtocolVer.compareTo(VER_2_5_0) < 0 && !F.isEmpty(connProps.getUsername())) { - throw new SQLException("Authentication doesn't support by remote server[driverProtocolVer=" + CURRENT_VER + - ", remoteNodeProtocolVer=" + srvProtocolVer + ", err=" + err + ", url=" + connProps.getUrl() + ']', - SqlStateCode.CONNECTION_REJECTED); + if (srvProtoVer0.compareTo(VER_2_5_0) < 0 && !F.isEmpty(connProps.getUsername())) { + throw new SQLException("Authentication doesn't support by remote server[driverProtocolVer=" + + CURRENT_VER + ", remoteNodeProtocolVer=" + srvProtoVer0 + ", err=" + err + + ", url=" + connProps.getUrl() + ']', SqlStateCode.CONNECTION_REJECTED); } - if (VER_2_4_0.equals(srvProtocolVer) || VER_2_3_0.equals(srvProtocolVer) || - VER_2_1_5.equals(srvProtocolVer)) - handshake(srvProtocolVer); - else if (VER_2_1_0.equals(srvProtocolVer)) + if (VER_2_4_0.equals(srvProtoVer0) || VER_2_3_0.equals(srvProtoVer0) || + VER_2_1_5.equals(srvProtoVer0)) + handshake(srvProtoVer0); + else if (VER_2_1_0.equals(srvProtoVer0)) handshake_2_1_0(); else { throw new SQLException("Handshake failed [driverProtocolVer=" + CURRENT_VER + - ", remoteNodeProtocolVer=" + srvProtocolVer + ", err=" + err + ']', + ", remoteNodeProtocolVer=" + srvProtoVer0 + ", err=" + err + ']', SqlStateCode.CONNECTION_REJECTED); } } @@ -378,8 +415,11 @@ private void handshake_2_1_0() throws IOException, SQLException { boolean accepted = reader.readBoolean(); - if (accepted) + if (accepted) { igniteVer = new IgniteProductVersion((byte)2, (byte)1, (byte)0, "Unknown", 0L, null); + + srvProtocolVer = VER_2_1_0; + } else { short maj = reader.readShort(); short min = reader.readShort(); @@ -394,21 +434,86 @@ private void handshake_2_1_0() throws IOException, SQLException { } } + /** + * @param req Request. + * @throws IOException In case of IO error. + * @throws SQLException On error. + */ + void sendBatchRequestNoWaitResponse(JdbcOrderedBatchExecuteRequest req) throws IOException, SQLException { + synchronized (mux) { + if (ownThread != null) { + throw new SQLException("Concurrent access to JDBC connection is not allowed" + + " [ownThread=" + ownThread.getName() + + ", curThread=" + Thread.currentThread().getName(), SqlStateCode.CONNECTION_FAILURE); + } + + ownThread = Thread.currentThread(); + } + + try { + if (!isUnorderedStreamSupported()) { + throw new SQLException("Streaming without response doesn't supported by server [driverProtocolVer=" + + CURRENT_VER + ", remoteNodeVer=" + igniteVer + ']', SqlStateCode.INTERNAL_ERROR); + } + + int cap = guessCapacity(req); + + BinaryWriterExImpl writer = new BinaryWriterExImpl(null, new BinaryHeapOutputStream(cap), + null, null); + + req.writeBinary(writer); + + send(writer.array()); + } + finally { + synchronized (mux) { + ownThread = null; + } + } + } + /** * @param req Request. * @return Server response. * @throws IOException In case of IO error. + * @throws SQLException On concurrent access to JDBC connection. */ @SuppressWarnings("unchecked") - JdbcResponse sendRequest(JdbcRequest req) throws IOException { - int cap = guessCapacity(req); + JdbcResponse sendRequest(JdbcRequest req) throws SQLException, IOException { + synchronized (mux) { + if (ownThread != null) { + throw new SQLException("Concurrent access to JDBC connection is not allowed" + + " [ownThread=" + ownThread.getName() + + ", curThread=" + Thread.currentThread().getName(), SqlStateCode.CONNECTION_FAILURE); + } + + ownThread = Thread.currentThread(); + } - BinaryWriterExImpl writer = new BinaryWriterExImpl(null, new BinaryHeapOutputStream(cap), null, null); + try { + int cap = guessCapacity(req); - req.writeBinary(writer); + BinaryWriterExImpl writer = new BinaryWriterExImpl(null, new BinaryHeapOutputStream(cap), null, null); - send(writer.array()); + req.writeBinary(writer); + + send(writer.array()); + + return readResponse(); + } + finally { + synchronized (mux) { + ownThread = null; + } + } + } + /** + * @return Server response. + * @throws IOException In case of IO error. + */ + @SuppressWarnings("unchecked") + JdbcResponse readResponse() throws IOException { BinaryReaderExImpl reader = new BinaryReaderExImpl(null, new BinaryHeapInputStream(read()), null, null, false); JdbcResponse res = new JdbcResponse(); @@ -418,6 +523,7 @@ JdbcResponse sendRequest(JdbcRequest req) throws IOException { return res; } + /** * Try to guess request capacity. * @@ -503,7 +609,7 @@ private byte[] read(int size) throws IOException { * Close the client IO. */ public void close() { - if (closed) + if (!connected) return; // Clean up resources. @@ -513,7 +619,7 @@ public void close() { if (endpoint != null) endpoint.close(); - closed = true; + connected = false; } /** @@ -529,4 +635,36 @@ public ConnectionProperties connectionProperties() { IgniteProductVersion igniteVersion() { return igniteVer; } -} \ No newline at end of file + + /** + * @return {@code true} If the unordered streaming supported. + */ + boolean isUnorderedStreamSupported() { + assert srvProtocolVer != null; + + return srvProtocolVer.compareTo(VER_2_5_0) >= 0; + } + + /** + * @return Current server index. + */ + public int serverIndex() { + return srvIdx; + } + + /** + * Get next server index. + * + * @param len Number of servers. + * @return Index of the next server to connect to. + */ + private static int nextServerIndex(int len) { + if (len == 1) + return 0; + else { + long nextIdx = IDX_GEN.getAndIncrement(); + + return (int)(nextIdx % len); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java index f61ccf9b3b046..c589c067d570b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcConnection.java @@ -435,9 +435,6 @@ private Ignite getIgnite(String cfgUrl) throws IgniteCheckedException { /** {@inheritDoc} */ @Override public void setReadOnly(boolean readOnly) throws SQLException { ensureNotClosed(); - - if (!readOnly) - throw new SQLFeatureNotSupportedException("Updates are not supported."); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java index 07034f4579e41..fce046dd752a9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcQueryTask.java @@ -174,7 +174,7 @@ public JdbcQueryTask(Ignite ignite, String cacheName, String schemaName, String if (fldQryCursor instanceof BulkLoadContextCursor) { fldQryCursor.close(); - + throw new SQLException("COPY command is currently supported only in thin JDBC driver."); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java index e6e84880ea2eb..2025b90fec7f2 100755 --- a/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/jdbc2/JdbcResultSet.java @@ -20,6 +20,7 @@ import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; +import java.net.MalformedURLException; import java.net.URL; import java.sql.Array; import java.sql.Blob; @@ -37,6 +38,9 @@ import java.sql.Statement; import java.sql.Time; import java.sql.Timestamp; +import java.text.DecimalFormat; +import java.text.DecimalFormatSymbols; +import java.text.ParseException; import java.util.ArrayList; import java.util.Calendar; import java.util.Collections; @@ -55,6 +59,25 @@ * JDBC result set implementation. */ public class JdbcResultSet implements ResultSet { + /** Decimal format to convert streing to decimal. */ + private static final ThreadLocal decimalFormat = new ThreadLocal() { + /** {@inheritDoc} */ + @Override protected DecimalFormat initialValue() { + DecimalFormatSymbols symbols = new DecimalFormatSymbols(); + + symbols.setGroupingSeparator(','); + symbols.setDecimalSeparator('.'); + + String ptrn = "#,##0.0#"; + + DecimalFormat decimalFormat = new DecimalFormat(ptrn, symbols); + + decimalFormat.setParseBigDecimal(true); + + return decimalFormat; + } + }; + /** Is query. */ private final boolean isQry; @@ -189,7 +212,9 @@ else if (!finished) { } /** + * Fetch next results page. * + * @throws SQLException On error. */ private void fetchPage() throws SQLException { JdbcConnection conn = (JdbcConnection)stmt.getConnection(); @@ -253,81 +278,277 @@ void closeInternal() throws SQLException { /** {@inheritDoc} */ @Override public String getString(int colIdx) throws SQLException { - return getTypedValue(colIdx, String.class); + Object val = getValue(colIdx); + + return val == null ? null : String.valueOf(val); } /** {@inheritDoc} */ @Override public boolean getBoolean(int colIdx) throws SQLException { - Boolean val = getTypedValue(colIdx, Boolean.class); + Object val = getValue(colIdx); + + if (val == null) + return false; + + Class cls = val.getClass(); - return val != null ? val : false; + if (cls == Boolean.class) + return ((Boolean)val); + else if (val instanceof Number) + return ((Number)val).intValue() != 0; + else if (cls == String.class || cls == Character.class) { + try { + return Integer.parseInt(val.toString()) != 0; + } + catch (NumberFormatException e) { + throw new SQLException("Cannot convert to boolean: " + val, SqlStateCode.CONVERSION_FAILED, e); + } + } + else + throw new SQLException("Cannot convert to boolean: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public byte getByte(int colIdx) throws SQLException { - Byte val = getTypedValue(colIdx, Byte.class); + Object val = getValue(colIdx); + + if (val == null) + return 0; - return val != null ? val : 0; + Class cls = val.getClass(); + + if (val instanceof Number) + return ((Number)val).byteValue(); + else if (cls == Boolean.class) + return (Boolean) val ? (byte) 1 : (byte) 0; + else if (cls == String.class || cls == Character.class) { + try { + return Byte.parseByte(val.toString()); + } + catch (NumberFormatException e) { + throw new SQLException("Cannot convert to byte: " + val, SqlStateCode.CONVERSION_FAILED, e); + } + } + else + throw new SQLException("Cannot convert to byte: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public short getShort(int colIdx) throws SQLException { - Short val = getTypedValue(colIdx, Short.class); + Object val = getValue(colIdx); + + if (val == null) + return 0; + + Class cls = val.getClass(); - return val != null ? val : 0; + if (val instanceof Number) + return ((Number) val).shortValue(); + else if (cls == Boolean.class) + return (Boolean) val ? (short) 1 : (short) 0; + else if (cls == String.class || cls == Character.class) { + try { + return Short.parseShort(val.toString()); + } + catch (NumberFormatException e) { + throw new SQLException("Cannot convert to short: " + val, SqlStateCode.CONVERSION_FAILED, e); + } + } + else + throw new SQLException("Cannot convert to short: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public int getInt(int colIdx) throws SQLException { - Integer val = getTypedValue(colIdx, Integer.class); + Object val = getValue(colIdx); - return val != null ? val : 0; + if (val == null) + return 0; + + Class cls = val.getClass(); + + if (val instanceof Number) + return ((Number) val).intValue(); + else if (cls == Boolean.class) + return (Boolean) val ? 1 : 0; + else if (cls == String.class || cls == Character.class) { + try { + return Integer.parseInt(val.toString()); + } + catch (NumberFormatException e) { + throw new SQLException("Cannot convert to int: " + val, SqlStateCode.CONVERSION_FAILED, e); + } + } + else + throw new SQLException("Cannot convert to int: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public long getLong(int colIdx) throws SQLException { - Long val = getTypedValue(colIdx, Long.class); + Object val = getValue(colIdx); + + if (val == null) + return 0; + + Class cls = val.getClass(); - return val != null ? val : 0; + if (val instanceof Number) + return ((Number)val).longValue(); + else if (cls == Boolean.class) + return (long) ((Boolean) val ? 1 : 0); + else if (cls == String.class || cls == Character.class) { + try { + return Long.parseLong(val.toString()); + } + catch (NumberFormatException e) { + throw new SQLException("Cannot convert to long: " + val, SqlStateCode.CONVERSION_FAILED, e); + } + } + else + throw new SQLException("Cannot convert to long: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public float getFloat(int colIdx) throws SQLException { - Float val = getTypedValue(colIdx, Float.class); + Object val = getValue(colIdx); - return val != null ? val : 0; + if (val == null) + return 0; + + Class cls = val.getClass(); + + if (val instanceof Number) + return ((Number) val).floatValue(); + else if (cls == Boolean.class) + return (float) ((Boolean) val ? 1 : 0); + else if (cls == String.class || cls == Character.class) { + try { + return Float.parseFloat(val.toString()); + } + catch (NumberFormatException e) { + throw new SQLException("Cannot convert to float: " + val, SqlStateCode.CONVERSION_FAILED, e); + } + } + else + throw new SQLException("Cannot convert to float: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public double getDouble(int colIdx) throws SQLException { - Double val = getTypedValue(colIdx, Double.class); + Object val = getValue(colIdx); + + if (val == null) + return 0; + + Class cls = val.getClass(); - return val != null ? val : 0; + if (val instanceof Number) + return ((Number) val).doubleValue(); + else if (cls == Boolean.class) + return (double)((Boolean) val ? 1 : 0); + else if (cls == String.class || cls == Character.class) { + try { + return Double.parseDouble(val.toString()); + } + catch (NumberFormatException e) { + throw new SQLException("Cannot convert to double: " + val, SqlStateCode.CONVERSION_FAILED, e); + } + } + else + throw new SQLException("Cannot convert to double: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public BigDecimal getBigDecimal(int colIdx, int scale) throws SQLException { - return getTypedValue(colIdx, BigDecimal.class); + BigDecimal val = getBigDecimal(colIdx); + + return val == null ? null : val.setScale(scale, BigDecimal.ROUND_HALF_UP); } /** {@inheritDoc} */ @Override public byte[] getBytes(int colIdx) throws SQLException { - return getTypedValue(colIdx, byte[].class); + Object val = getValue(colIdx); + + if (val == null) + return null; + + Class cls = val.getClass(); + + if (cls == byte[].class) + return (byte[])val; + else if (cls == Byte.class) + return new byte[] {(byte)val}; + else if (cls == Short.class) { + short x = (short)val; + + return new byte[] {(byte)(x >> 8), (byte)x}; + } + else if (cls == Integer.class) { + int x = (int)val; + + return new byte[] { (byte) (x >> 24), (byte) (x >> 16), (byte) (x >> 8), (byte) x}; + } + else if (cls == Long.class) { + long x = (long)val; + + return new byte[] {(byte) (x >> 56), (byte) (x >> 48), (byte) (x >> 40), (byte) (x >> 32), + (byte) (x >> 24), (byte) (x >> 16), (byte) (x >> 8), (byte) x}; + } + else if (cls == String.class) + return ((String)val).getBytes(); + else + throw new SQLException("Cannot convert to byte[]: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public Date getDate(int colIdx) throws SQLException { - return getTypedValue(colIdx, Date.class); + Object val = getValue(colIdx); + + if (val == null) + return null; + + Class cls = val.getClass(); + + if (cls == Date.class) + return (Date)val; + else if (cls == java.util.Date.class || cls == Time.class || cls == Timestamp.class) + return new Date(((java.util.Date)val).getTime()); + else + throw new SQLException("Cannot convert to date: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public Time getTime(int colIdx) throws SQLException { - return getTypedValue(colIdx, Time.class); + Object val = getValue(colIdx); + + if (val == null) + return null; + + Class cls = val.getClass(); + + if (cls == Time.class) + return (Time)val; + else if (cls == java.util.Date.class || cls == Date.class || cls == Timestamp.class) + return new Time(((java.util.Date)val).getTime()); + else + throw new SQLException("Cannot convert to time: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public Timestamp getTimestamp(int colIdx) throws SQLException { - return getTypedValue(colIdx, Timestamp.class); + Object val = getValue(colIdx); + + if (val == null) + return null; + + Class cls = val.getClass(); + + if (cls == Timestamp.class) + return (Timestamp)val; + else if (cls == java.util.Date.class || cls == Date.class || cls == Time.class) + return new Timestamp(((java.util.Date)val).getTime()); + else + throw new SQLException("Cannot convert to timestamp: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @@ -353,81 +574,93 @@ void closeInternal() throws SQLException { /** {@inheritDoc} */ @Override public String getString(String colLb) throws SQLException { - return getTypedValue(colLb, String.class); + int colIdx = findColumn(colLb); + + return getString(colIdx); } /** {@inheritDoc} */ @Override public boolean getBoolean(String colLb) throws SQLException { - Boolean val = getTypedValue(colLb, Boolean.class); + int colIdx = findColumn(colLb); - return val != null ? val : false; + return getBoolean(colIdx); } /** {@inheritDoc} */ @Override public byte getByte(String colLb) throws SQLException { - Byte val = getTypedValue(colLb, Byte.class); + int colIdx = findColumn(colLb); - return val != null ? val : 0; + return getByte(colIdx); } /** {@inheritDoc} */ @Override public short getShort(String colLb) throws SQLException { - Short val = getTypedValue(colLb, Short.class); + int colIdx = findColumn(colLb); - return val != null ? val : 0; + return getShort(colIdx); } /** {@inheritDoc} */ @Override public int getInt(String colLb) throws SQLException { - Integer val = getTypedValue(colLb, Integer.class); + int colIdx = findColumn(colLb); - return val != null ? val : 0; + return getInt(colIdx); } /** {@inheritDoc} */ @Override public long getLong(String colLb) throws SQLException { - Long val = getTypedValue(colLb, Long.class); + int colIdx = findColumn(colLb); - return val != null ? val : 0; + return getLong(colIdx); } /** {@inheritDoc} */ @Override public float getFloat(String colLb) throws SQLException { - Float val = getTypedValue(colLb, Float.class); + int colIdx = findColumn(colLb); - return val != null ? val : 0; + return getFloat(colIdx); } /** {@inheritDoc} */ @Override public double getDouble(String colLb) throws SQLException { - Double val = getTypedValue(colLb, Double.class); + int colIdx = findColumn(colLb); - return val != null ? val : 0; + return getDouble(colIdx); } /** {@inheritDoc} */ @Override public BigDecimal getBigDecimal(String colLb, int scale) throws SQLException { - return getTypedValue(colLb, BigDecimal.class); + int colIdx = findColumn(colLb); + + return getBigDecimal(colIdx, scale); } /** {@inheritDoc} */ @Override public byte[] getBytes(String colLb) throws SQLException { - return getTypedValue(colLb, byte[].class); + int colIdx = findColumn(colLb); + + return getBytes(colIdx); } /** {@inheritDoc} */ @Override public Date getDate(String colLb) throws SQLException { - return getTypedValue(colLb, Date.class); + int colIdx = findColumn(colLb); + + return getDate(colIdx); } /** {@inheritDoc} */ @Override public Time getTime(String colLb) throws SQLException { - return getTypedValue(colLb, Time.class); + int colIdx = findColumn(colLb); + + return getTime(colIdx); } /** {@inheritDoc} */ @Override public Timestamp getTimestamp(String colLb) throws SQLException { - return getTypedValue(colLb, Timestamp.class); + int colIdx = findColumn(colLb); + + return getTimestamp(colIdx); } /** {@inheritDoc} */ @@ -482,12 +715,14 @@ void closeInternal() throws SQLException { /** {@inheritDoc} */ @Override public Object getObject(int colIdx) throws SQLException { - return getTypedValue(colIdx, Object.class); + return getValue(colIdx); } /** {@inheritDoc} */ @Override public Object getObject(String colLb) throws SQLException { - return getTypedValue(colLb, Object.class); + int colIdx = findColumn(colLb); + + return getValue(colIdx); } /** {@inheritDoc} */ @@ -499,6 +734,8 @@ void closeInternal() throws SQLException { if (idx == -1) throw new SQLException("Column not found: " + colLb); + assert idx >= 0; + return idx + 1; } @@ -518,12 +755,36 @@ void closeInternal() throws SQLException { /** {@inheritDoc} */ @Override public BigDecimal getBigDecimal(int colIdx) throws SQLException { - return getTypedValue(colIdx, BigDecimal.class); + Object val = getValue(colIdx); + + if (val == null) + return null; + + Class cls = val.getClass(); + + if (cls == BigDecimal.class) + return (BigDecimal)val; + else if (val instanceof Number) + return new BigDecimal(((Number)val).doubleValue()); + else if (cls == Boolean.class) + return new BigDecimal((Boolean)val ? 1 : 0); + else if (cls == String.class || cls == Character.class) { + try { + return (BigDecimal)decimalFormat.get().parse(val.toString()); + } + catch (ParseException e) { + throw new SQLException("Cannot convert to BigDecimal: " + val, SqlStateCode.CONVERSION_FAILED, e); + } + } + else + throw new SQLException("Cannot convert to BigDecimal: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public BigDecimal getBigDecimal(String colLb) throws SQLException { - return getTypedValue(colLb, BigDecimal.class); + int colIdx = findColumn(colLb); + + return getBigDecimal(colIdx); } /** {@inheritDoc} */ @@ -997,7 +1258,7 @@ void closeInternal() throws SQLException { /** {@inheritDoc} */ @Override public Object getObject(int colIdx, Map> map) throws SQLException { - return getTypedValue(colIdx, Object.class); + throw new SQLFeatureNotSupportedException("SQL structured type are not supported."); } /** {@inheritDoc} */ @@ -1028,7 +1289,7 @@ void closeInternal() throws SQLException { /** {@inheritDoc} */ @Override public Object getObject(String colLb, Map> map) throws SQLException { - return getTypedValue(colLb, Object.class); + throw new SQLFeatureNotSupportedException("SQL structured type are not supported."); } /** {@inheritDoc} */ @@ -1059,42 +1320,62 @@ void closeInternal() throws SQLException { /** {@inheritDoc} */ @Override public Date getDate(int colIdx, Calendar cal) throws SQLException { - return getTypedValue(colIdx, Date.class); + return getDate(colIdx); } /** {@inheritDoc} */ @Override public Date getDate(String colLb, Calendar cal) throws SQLException { - return getTypedValue(colLb, Date.class); + return getDate(colLb); } /** {@inheritDoc} */ @Override public Time getTime(int colIdx, Calendar cal) throws SQLException { - return getTypedValue(colIdx, Time.class); + return getTime(colIdx); } /** {@inheritDoc} */ @Override public Time getTime(String colLb, Calendar cal) throws SQLException { - return getTypedValue(colLb, Time.class); + return getTime(colLb); } /** {@inheritDoc} */ @Override public Timestamp getTimestamp(int colIdx, Calendar cal) throws SQLException { - return getTypedValue(colIdx, Timestamp.class); + return getTimestamp(colIdx); } /** {@inheritDoc} */ @Override public Timestamp getTimestamp(String colLb, Calendar cal) throws SQLException { - return getTypedValue(colLb, Timestamp.class); + return getTimestamp(colLb); } /** {@inheritDoc} */ @Override public URL getURL(int colIdx) throws SQLException { - return getTypedValue(colIdx, URL.class); + Object val = getValue(colIdx); + + if (val == null) + return null; + + Class cls = val.getClass(); + + if (cls == URL.class) + return (URL)val; + else if (cls == String.class) { + try { + return new URL(val.toString()); + } + catch (MalformedURLException e) { + throw new SQLException("Cannot convert to URL: " + val, SqlStateCode.CONVERSION_FAILED, e); + } + } + else + throw new SQLException("Cannot convert to URL: " + val, SqlStateCode.CONVERSION_FAILED); } /** {@inheritDoc} */ @Override public URL getURL(String colLb) throws SQLException { - return getTypedValue(colLb, URL.class); + int colIdx = findColumn(colLb); + + return getURL(colIdx); } /** {@inheritDoc} */ @@ -1486,57 +1767,77 @@ void closeInternal() throws SQLException { } /** {@inheritDoc} */ - @Override public T getObject(int colIdx, Class type) throws SQLException { - return getTypedValue(colIdx, type); + @Override public T getObject(int colIdx, Class targetCls) throws SQLException { + return (T)getObject0(colIdx, targetCls); } /** {@inheritDoc} */ - @Override public T getObject(String colLb, Class type) throws SQLException { - return getTypedValue(colLb, type); + @Override public T getObject(String colLb, Class targetCls) throws SQLException { + int colIdx = findColumn(colLb); + + return getObject(colIdx, targetCls); } /** - * Gets casted field value by label. - * - * @param colLb Column label. - * @param cls Value class. - * @return Casted field value. - * @throws SQLException In case of error. + * @param colIdx Column index. + * @param targetCls Class representing the Java data type to convert the designated column to. + * @return Converted object. + * @throws SQLException On error. */ - private T getTypedValue(String colLb, Class cls) throws SQLException { - ensureNotClosed(); - ensureHasCurrentRow(); - - String name = colLb.toUpperCase(); - - Integer idx = stmt.fieldsIdxs.get(name); - - int colIdx; - - if (idx != null) - colIdx = idx; + private Object getObject0(int colIdx, Class targetCls) throws SQLException { + if (targetCls == Boolean.class) + return getBoolean(colIdx); + else if (targetCls == Byte.class) + return getByte(colIdx); + else if (targetCls == Short.class) + return getShort(colIdx); + else if (targetCls == Integer.class) + return getInt(colIdx); + else if (targetCls == Long.class) + return getLong(colIdx); + else if (targetCls == Float.class) + return getFloat(colIdx); + else if (targetCls == Double.class) + return getDouble(colIdx); + else if (targetCls == String.class) + return getString(colIdx); + else if (targetCls == BigDecimal.class) + return getBigDecimal(colIdx); + else if (targetCls == Date.class) + return getDate(colIdx); + else if (targetCls == Time.class) + return getTime(colIdx); + else if (targetCls == Timestamp.class) + return getTimestamp(colIdx); + else if (targetCls == byte[].class) + return getBytes(colIdx); + else if (targetCls == URL.class) + return getURL(colIdx); else { - colIdx = cols.indexOf(name) + 1; + Object val = getValue(colIdx); - if (colIdx <= 0) - throw new SQLException("Invalid column label: " + colLb); + if (val == null) + return null; - stmt.fieldsIdxs.put(name, colIdx); - } + Class cls = val.getClass(); - return getTypedValue(colIdx, cls); + if (targetCls == cls) + return val; + else + throw new SQLException("Cannot convert to " + targetCls.getName() + ": " + val, + SqlStateCode.CONVERSION_FAILED); + } } /** - * Gets casted field value by index. + * Gets object field value by index. * * @param colIdx Column index. - * @param cls Value class. - * @return Casted field value. + * @return Object field value. * @throws SQLException In case of error. */ @SuppressWarnings("unchecked") - private T getTypedValue(int colIdx, Class cls) throws SQLException { + private Object getValue(int colIdx) throws SQLException { ensureNotClosed(); ensureHasCurrentRow(); @@ -1545,18 +1846,10 @@ private T getTypedValue(int colIdx, Class cls) throws SQLException { wasNull = val == null; - if (val == null) - return null; - else if (cls == String.class) - return (T)String.valueOf(val); - else - return cls.cast(val); - } - catch (IndexOutOfBoundsException ignored) { - throw new SQLException("Invalid column index: " + colIdx); + return val; } - catch (ClassCastException ignored) { - throw new SQLException("Cannot convert to " + cls.getSimpleName().toLowerCase(), SqlStateCode.CONVERSION_FAILED); + catch (IndexOutOfBoundsException e) { + throw new SQLException("Invalid column index: " + colIdx, e); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/GridManagerAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/GridManagerAdapter.java index 74f5a102d2f19..b0756cfe80801 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/GridManagerAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/GridManagerAdapter.java @@ -618,6 +618,14 @@ protected final String stopInfo() { return ctx.nodeAttributes(); } + @Override public boolean communicationFailureResolveSupported() { + return ctx.discovery().communicationErrorResolveSupported(); + } + + @Override public void resolveCommunicationFailure(ClusterNode node, Exception err) { + ctx.discovery().resolveCommunicationError(node, err); + } + /** * @param e Exception to handle. * @return GridSpiException Converted exception. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/IgniteMBeansManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/IgniteMBeansManager.java new file mode 100644 index 0000000000000..757e17d102739 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/IgniteMBeansManager.java @@ -0,0 +1,275 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers; + +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutorService; +import javax.management.JMException; +import javax.management.ObjectName; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.internal.ClusterLocalNodeMetricsMXBeanImpl; +import org.apache.ignite.internal.ClusterMetricsMXBeanImpl; +import org.apache.ignite.internal.GridKernalContextImpl; +import org.apache.ignite.internal.IgniteKernal; +import org.apache.ignite.internal.StripedExecutorMXBeanAdapter; +import org.apache.ignite.internal.ThreadPoolMXBeanAdapter; +import org.apache.ignite.internal.TransactionMetricsMxBeanImpl; +import org.apache.ignite.internal.TransactionsMXBeanImpl; +import org.apache.ignite.internal.processors.cache.persistence.DataStorageMXBeanImpl; +import org.apache.ignite.internal.stat.IoStatisticsMetricsLocalMXBeanImpl; +import org.apache.ignite.internal.util.StripedExecutor; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.internal.worker.FailureHandlingMxBeanImpl; +import org.apache.ignite.internal.worker.WorkersControlMXBeanImpl; +import org.apache.ignite.internal.worker.WorkersRegistry; +import org.apache.ignite.mxbean.ClusterMetricsMXBean; +import org.apache.ignite.mxbean.DataStorageMXBean; +import org.apache.ignite.mxbean.FailureHandlingMxBean; +import org.apache.ignite.mxbean.IgniteMXBean; +import org.apache.ignite.mxbean.IoStatisticsMetricsMXBean; +import org.apache.ignite.mxbean.StripedExecutorMXBean; +import org.apache.ignite.mxbean.ThreadPoolMXBean; +import org.apache.ignite.mxbean.TransactionMetricsMxBean; +import org.apache.ignite.mxbean.TransactionsMXBean; +import org.apache.ignite.mxbean.WorkersControlMXBean; +import org.apache.ignite.thread.IgniteStripedThreadPoolExecutor; +import org.jetbrains.annotations.Nullable; + +/** + * Class that registers and unregisters MBeans for kernal. + */ +public class IgniteMBeansManager { + /** Ignite kernal */ + private final IgniteKernal kernal; + + /** Ignite kernal context. */ + private final GridKernalContextImpl ctx; + + /** Logger. */ + private final IgniteLogger log; + + /** MBean names stored to be unregistered later. */ + private final Set mBeanNames = new HashSet<>(); + + /** + * @param kernal Grid kernal. + */ + public IgniteMBeansManager(IgniteKernal kernal) { + this.kernal = kernal; + ctx = (GridKernalContextImpl)kernal.context(); + log = ctx.log(IgniteMBeansManager.class); + } + + /** + * Registers all kernal MBeans (for kernal, metrics, thread pools). + * + * @param utilityCachePool Utility cache pool + * @param execSvc Executor service + * @param sysExecSvc System executor service + * @param stripedExecSvc Striped executor + * @param p2pExecSvc P2P executor service + * @param mgmtExecSvc Management executor service + * @param igfsExecSvc IGFS executor service + * @param dataStreamExecSvc data stream executor service + * @param restExecSvc Reset executor service + * @param affExecSvc Affinity executor service + * @param idxExecSvc Indexing executor service + * @param callbackExecSvc Callback executor service + * @param qryExecSvc Query executor service + * @param schemaExecSvc Schema executor service + * @param customExecSvcs Custom named executors + * @throws IgniteCheckedException if fails to register any of the MBeans + */ + public void registerAllMBeans( + ExecutorService utilityCachePool, + final ExecutorService execSvc, + final ExecutorService svcExecSvc, + final ExecutorService sysExecSvc, + final StripedExecutor stripedExecSvc, + ExecutorService p2pExecSvc, + ExecutorService mgmtExecSvc, + ExecutorService igfsExecSvc, + StripedExecutor dataStreamExecSvc, + ExecutorService restExecSvc, + ExecutorService affExecSvc, + @Nullable ExecutorService idxExecSvc, + IgniteStripedThreadPoolExecutor callbackExecSvc, + ExecutorService qryExecSvc, + ExecutorService schemaExecSvc, + @Nullable final Map customExecSvcs, + WorkersRegistry workersRegistry + ) throws IgniteCheckedException { + if (U.IGNITE_MBEANS_DISABLED) + return; + + // Kernal + registerMBean("Kernal", IgniteKernal.class.getSimpleName(), kernal, IgniteMXBean.class); + + // Metrics + ClusterMetricsMXBean locMetricsBean = new ClusterLocalNodeMetricsMXBeanImpl(ctx.discovery()); + registerMBean("Kernal", locMetricsBean.getClass().getSimpleName(), locMetricsBean, ClusterMetricsMXBean.class); + ClusterMetricsMXBean metricsBean = new ClusterMetricsMXBeanImpl(kernal.cluster()); + registerMBean("Kernal", metricsBean.getClass().getSimpleName(), metricsBean, ClusterMetricsMXBean.class); + + //IO metrics + IoStatisticsMetricsMXBean ioStatMetricsBean = new IoStatisticsMetricsLocalMXBeanImpl(ctx.ioStats()); + registerMBean("IOMetrics", ioStatMetricsBean.getClass().getSimpleName(), ioStatMetricsBean, IoStatisticsMetricsMXBean.class); + + // Transaction metrics + TransactionMetricsMxBean txMetricsMXBean = new TransactionMetricsMxBeanImpl(ctx.cache().transactions().metrics()); + registerMBean("TransactionMetrics", txMetricsMXBean.getClass().getSimpleName(), txMetricsMXBean, TransactionMetricsMxBean.class); + + // Transactions + TransactionsMXBean txMXBean = new TransactionsMXBeanImpl(ctx); + registerMBean("Transactions", txMXBean.getClass().getSimpleName(), txMXBean, TransactionsMXBean.class); + + // Data storage + DataStorageMXBean dataStorageMXBean = new DataStorageMXBeanImpl(ctx); + registerMBean("DataStorage", dataStorageMXBean.getClass().getSimpleName(), dataStorageMXBean, DataStorageMXBean.class); + + // Executors + registerExecutorMBean("GridUtilityCacheExecutor", utilityCachePool); + registerExecutorMBean("GridExecutionExecutor", execSvc); + registerExecutorMBean("GridServicesExecutor", svcExecSvc); + registerExecutorMBean("GridSystemExecutor", sysExecSvc); + registerExecutorMBean("GridClassLoadingExecutor", p2pExecSvc); + registerExecutorMBean("GridManagementExecutor", mgmtExecSvc); + registerExecutorMBean("GridIgfsExecutor", igfsExecSvc); + registerExecutorMBean("GridDataStreamExecutor", dataStreamExecSvc); + registerExecutorMBean("GridAffinityExecutor", affExecSvc); + registerExecutorMBean("GridCallbackExecutor", callbackExecSvc); + registerExecutorMBean("GridQueryExecutor", qryExecSvc); + registerExecutorMBean("GridSchemaExecutor", schemaExecSvc); + + if (idxExecSvc != null) + registerExecutorMBean("GridIndexingExecutor", idxExecSvc); + + if (ctx.config().getConnectorConfiguration() != null) + registerExecutorMBean("GridRestExecutor", restExecSvc); + + if (stripedExecSvc != null) { + // striped executor uses a custom adapter + registerMBean("Thread Pools", + "StripedExecutor", + new StripedExecutorMXBeanAdapter(stripedExecSvc), + StripedExecutorMXBean.class); + } + + if (customExecSvcs != null) { + for (Map.Entry entry : customExecSvcs.entrySet()) + registerExecutorMBean(entry.getKey(), entry.getValue()); + } + + if (U.IGNITE_TEST_FEATURES_ENABLED) { + WorkersControlMXBean workerCtrlMXBean = new WorkersControlMXBeanImpl(workersRegistry); + + registerMBean("Kernal", workerCtrlMXBean.getClass().getSimpleName(), + workerCtrlMXBean, WorkersControlMXBean.class); + } + + FailureHandlingMxBean blockOpCtrlMXBean = new FailureHandlingMxBeanImpl(workersRegistry, + ctx.cache().context().database()); + + registerMBean("Kernal", blockOpCtrlMXBean.getClass().getSimpleName(), blockOpCtrlMXBean, + FailureHandlingMxBean.class); + + if (ctx.query().moduleEnabled()) + ctx.query().getIndexing().registerMxBeans(this); + } + + /** + * Registers a {@link ThreadPoolMXBean} for an executor. + * + * @param name name of the bean to register + * @param exec executor to register a bean for + * @throws IgniteCheckedException if registration fails. + */ + private void registerExecutorMBean(String name, ExecutorService exec) throws IgniteCheckedException { + registerMBean("Thread Pools", name, new ThreadPoolMXBeanAdapter(exec), ThreadPoolMXBean.class); + } + + /** + * Register an Ignite MBean. + * + * @param grp bean group name + * @param name bean name + * @param impl bean implementation + * @param itf bean interface + * @param bean type + * @throws IgniteCheckedException if registration fails + */ + public void registerMBean(String grp, String name, T impl, Class itf) throws IgniteCheckedException { + assert !U.IGNITE_MBEANS_DISABLED; + + try { + ObjectName objName = U.registerMBean( + ctx.config().getMBeanServer(), + ctx.config().getIgniteInstanceName(), + grp, name, impl, itf); + + if (log.isDebugEnabled()) + log.debug("Registered MBean: " + objName); + + mBeanNames.add(objName); + } + catch (JMException e) { + throw new IgniteCheckedException("Failed to register MBean " + name, e); + } + } + + /** + * Unregisters all previously registered MBeans. + * + * @return {@code true} if all mbeans were unregistered successfully; {@code false} otherwise. + */ + public boolean unregisterAllMBeans() { + boolean success = true; + + for (ObjectName name : mBeanNames) + success = success && unregisterMBean(name); + + return success; + } + + /** + * Unregisters given MBean. + * + * @param mbean MBean to unregister. + * @return {@code true} if successfully unregistered, {@code false} otherwise. + */ + public boolean unregisterMBean(ObjectName mbean) { + assert !U.IGNITE_MBEANS_DISABLED; + + try { + ctx.config().getMBeanServer().unregisterMBean(mbean); + + if (log.isDebugEnabled()) + log.debug("Unregistered MBean: " + mbean); + + return true; + } + catch (JMException e) { + U.error(log, "Failed to unregister MBean.", e); + + return false; + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointManager.java index f0b19f3741270..6337d533f192c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointManager.java @@ -188,8 +188,7 @@ public boolean storeCheckpoint(GridTaskSessionInternal ses, U.warn(log, S.toString("Checkpoint will not be saved due to session invalidation", "key", key, true, "val", state, true, - "ses", ses, false), - "Checkpoint will not be saved due to session invalidation."); + "ses", ses, false)); break; } @@ -198,8 +197,7 @@ public boolean storeCheckpoint(GridTaskSessionInternal ses, U.warn(log, S.toString("Checkpoint will not be saved due to session timeout", "key", key, true, "val", state, true, - "ses", ses, false), - "Checkpoint will not be saved due to session timeout."); + "ses", ses, false)); break; } @@ -224,8 +222,7 @@ public boolean storeCheckpoint(GridTaskSessionInternal ses, U.warn(log, S.toString("Checkpoint will not be saved due to session invalidation", "key", key, true, "val", state, true, - "ses", ses, false), - "Checkpoint will not be saved due to session invalidation."); + "ses", ses, false)); keyMap.remove(ses.getId(), keys); @@ -508,4 +505,4 @@ private class CheckpointRequestListener implements GridMessageListener { } } } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointRequest.java index 8b21ff2fe5382..4b25e0b687662 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/checkpoint/GridCheckpointRequest.java @@ -177,4 +177,4 @@ public String getCheckpointSpi() { @Override public String toString() { return S.toString(GridCheckpointRequest.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java index d5cdd2dd7fc53..71db0fc8d06f8 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoManager.java @@ -59,6 +59,7 @@ import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException; import org.apache.ignite.internal.IgniteComponentType; import org.apache.ignite.internal.IgniteDeploymentCheckedException; +import org.apache.ignite.internal.IgniteFeatures; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; import org.apache.ignite.internal.direct.DirectMessageReader; @@ -69,6 +70,8 @@ import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.processors.platform.message.PlatformMessageFilter; import org.apache.ignite.internal.processors.pool.PoolProcessor; +import org.apache.ignite.internal.processors.security.OperationSecurityContext; +import org.apache.ignite.internal.processors.security.SecurityContext; import org.apache.ignite.internal.processors.timeout.GridTimeoutObject; import org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashSet; import org.apache.ignite.internal.util.StripedCompositeReadWriteLock; @@ -78,6 +81,7 @@ import org.apache.ignite.internal.util.lang.IgnitePair; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.S; @@ -96,6 +100,7 @@ import org.apache.ignite.spi.communication.CommunicationListener; import org.apache.ignite.spi.communication.CommunicationSpi; import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; @@ -133,7 +138,7 @@ public class GridIoManager extends GridManagerAdapter CUR_PLC = new ThreadLocal<>(); @@ -203,11 +208,7 @@ public class GridIoManager extends GridManagerAdapter {}; /** * @param ctx Grid kernal context. @@ -298,9 +299,9 @@ public void resetMetrics() { @Override public MessageReader reader(UUID rmtNodeId, MessageFactory msgFactory) throws IgniteCheckedException { - assert rmtNodeId != null; - return new DirectMessageReader(msgFactory, U.directProtocolVersion(ctx, rmtNodeId)); + return new DirectMessageReader(msgFactory, + rmtNodeId != null ? U.directProtocolVersion(ctx, rmtNodeId) : GridIoManager.DIRECT_PROTO_VER); } }; } @@ -1048,7 +1049,7 @@ private void processP2PMessage( assert obj != null; - invokeListener(msg.policy(), lsnr, nodeId, obj); + invokeListener(msg.policy(), lsnr, nodeId, obj, secSubj(msg)); } finally { threadProcessingMessage(false, null); @@ -1090,7 +1091,11 @@ private void processRegularMessage( processRegularMessage0(msg, nodeId); } - finally { + catch (Throwable e) { + log.error("An error occurred processing the message [msg=" + msg + ", nodeId=" + nodeId + "].", e); + + throw e; + } finally { threadProcessingMessage(false, null); msgC.run(); @@ -1181,7 +1186,7 @@ private void processRegularMessage0(GridIoMessage msg, UUID nodeId) { assert obj != null; - invokeListener(msg.policy(), lsnr, nodeId, obj); + invokeListener(msg.policy(), lsnr, nodeId, obj, secSubj(msg)); } /** @@ -1543,8 +1548,9 @@ private void unwindMessageSet(GridCommunicationMessageSet msgSet, GridMessageLis * @param lsnr Listener. * @param nodeId Node ID. * @param msg Message. + * @param secCtxMsg Security subject that will be used to open a security session. */ - private void invokeListener(Byte plc, GridMessageListener lsnr, UUID nodeId, Object msg) { + private void invokeListener(Byte plc, GridMessageListener lsnr, UUID nodeId, Object msg, @Nullable T2 secCtxMsg) { Byte oldPlc = CUR_PLC.get(); boolean change = !F.eq(oldPlc, plc); @@ -1552,7 +1558,10 @@ private void invokeListener(Byte plc, GridMessageListener lsnr, UUID nodeId, Obj if (change) CUR_PLC.set(plc); - try { + SecurityContext secCtx = secCtxMsg != null ? secCtxMsg.get2() : null; + UUID newSecSubjId = secCtxMsg != null && secCtxMsg.get1() != null ? secCtxMsg.get1() : nodeId; + + try (OperationSecurityContext s = secCtx != null ? ctx.security().withContext(secCtx) : ctx.security().withContext(newSecSubjId)) { lsnr.onMessage(nodeId, msg, plc); } finally { @@ -1614,7 +1623,7 @@ private void send( assert !async || msg instanceof GridIoUserMessage : msg; // Async execution was added only for IgniteMessaging. assert topicOrd >= 0 || !(topic instanceof GridTopic) : msg; - GridIoMessage ioMsg = new GridIoMessage(plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + GridIoMessage ioMsg = createGridIoMessage(topic, topicOrd, msg, plc, ordered, timeout, skipOnTimeout); if (locNodeId.equals(node.id())) { assert plc != P2P_POOL; @@ -1656,6 +1665,38 @@ else if (async) } } + /** + * @return One of two message wrappers. The first is {@link GridIoMessage}, the second is secured version {@link + * GridIoSecurityAwareMessage}. + */ + private @NotNull GridIoMessage createGridIoMessage( + Object topic, + int topicOrd, + Message msg, + byte plc, + boolean ordered, + long timeout, + boolean skipOnTimeout) throws IgniteCheckedException { + boolean securityMsgSupported = IgniteFeatures.allNodesSupports(ctx.discovery().allNodes(), IgniteFeatures.IGNITE_SECURITY_PROCESSOR); + + if (ctx.security().enabled() && securityMsgSupported) { + UUID secSubjId = null; + + SecurityContext secCtx = ctx.security().securityContext(); + UUID curSecSubjId = secCtx.subject().id(); + + if (!locNodeId.equals(curSecSubjId)) + secSubjId = curSecSubjId; + + //Network optimization + byte[] secSubject = secSubjId != null && ctx.discovery().node(secSubjId) == null ? U.marshal(marsh, secCtx) : null; + + return new GridIoSecurityAwareMessage(secSubjId, secSubject, plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + } + + return new GridIoMessage(plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + } + /** * @param nodeId Id of destination node. * @param topic Topic to send the message to. @@ -1964,11 +2005,24 @@ else if (loc) { } /** + * Subscribe at messages from a topic. + * * @param topic Topic to subscribe to. * @param p Message predicate. */ - @SuppressWarnings("unchecked") - public void addUserMessageListener(@Nullable final Object topic, @Nullable final IgniteBiPredicate p) { + public void addUserMessageListener(final @Nullable Object topic, final @Nullable IgniteBiPredicate p) { + addUserMessageListener(topic, p, null); + } + + /** + * @param topic Topic to subscribe to. + * @param p Message predicate. + */ + public void addUserMessageListener( + final @Nullable Object topic, + final @Nullable IgniteBiPredicate p, + final @Nullable UUID nodeId + ) { if (p != null) { try { if (p instanceof PlatformMessageFilter) @@ -1977,7 +2031,7 @@ public void addUserMessageListener(@Nullable final Object topic, @Nullable final ctx.resource().injectGeneric(p); addMessageListener(TOPIC_COMM_USER, - new GridUserMessageListener(topic, (IgniteBiPredicate)p)); + new GridUserMessageListener(topic, (IgniteBiPredicate)p, nodeId)); } catch (IgniteCheckedException e) { throw new IgniteException(e); @@ -1991,13 +2045,8 @@ public void addUserMessageListener(@Nullable final Object topic, @Nullable final */ @SuppressWarnings("unchecked") public void removeUserMessageListener(@Nullable Object topic, IgniteBiPredicate p) { - try { - removeMessageListener(TOPIC_COMM_USER, - new GridUserMessageListener(topic, (IgniteBiPredicate)p)); - } - catch (IgniteCheckedException e) { - throw new IgniteException(e); - } + removeMessageListener(TOPIC_COMM_USER, + new GridUserMessageListener(topic, (IgniteBiPredicate)p)); } /** @@ -2416,15 +2465,27 @@ private class GridUserMessageListener implements GridMessageListener { /** User message topic. */ private final Object topic; + /** Initial node id. */ + private final UUID initNodeId; + /** * @param topic User topic. * @param predLsnr Predicate listener. - * @throws IgniteCheckedException If failed to inject resources to predicates. + * @param initNodeId Node id that registered given listener. */ - GridUserMessageListener(@Nullable Object topic, @Nullable IgniteBiPredicate predLsnr) - throws IgniteCheckedException { + GridUserMessageListener(@Nullable Object topic, @Nullable IgniteBiPredicate predLsnr, + @Nullable UUID initNodeId) { this.topic = topic; this.predLsnr = predLsnr; + this.initNodeId = initNodeId; + } + + /** + * @param topic User topic. + * @param predLsnr Predicate listener. + */ + GridUserMessageListener(@Nullable Object topic, @Nullable IgniteBiPredicate predLsnr) { + this(topic, predLsnr, null); } /** {@inheritDoc} */ @@ -2521,8 +2582,10 @@ private class GridUserMessageListener implements GridMessageListener { if (msgBody != null) { if (predLsnr != null) { - if (!predLsnr.apply(nodeId, msgBody)) - removeMessageListener(TOPIC_COMM_USER, this); + try(OperationSecurityContext s = ctx.security().withContext(initNodeId)) { + if (!predLsnr.apply(nodeId, msgBody)) + removeMessageListener(TOPIC_COMM_USER, this); + } } } } @@ -2749,7 +2812,7 @@ void unwind(GridMessageListener lsnr) { for (GridTuple3 t = msgs.poll(); t != null; t = msgs.poll()) { try { - invokeListener(plc, lsnr, nodeId, t.get1().message()); + invokeListener(plc, lsnr, nodeId, t.get1().message(), secSubj(t.get1())); } finally { if (t.get3() != null) @@ -3145,4 +3208,31 @@ public long binLatencyMcs() { return latencyLimit / (1000 * (resLatency.length - 1)); } } + + /** + * @param msg Communication message. + * @return A pair that represents a security subject id and security context. The returned value can be {@code null} + * in case of security context is not enabled. + */ + private T2 secSubj(GridIoMessage msg) { + if (ctx.security().enabled() && msg instanceof GridIoSecurityAwareMessage) { + GridIoSecurityAwareMessage secMsg = (GridIoSecurityAwareMessage)msg; + + SecurityContext secCtx = null; + + try { + secCtx = secMsg.getSecCtx() != null ? U.unmarshal(marsh, secMsg.getSecCtx(), U.resolveClassLoader(ctx.config())) : null; + } + catch (IgniteCheckedException e) { + log.error("Security context unmarshaled with error.", e); + } + + return new T2<>( + secMsg.secSubjId(), + secCtx + ); + } + + return null; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java index a0fc2f8088287..3c3f2a0f59c8d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoMessageFactory.java @@ -53,6 +53,11 @@ import org.apache.ignite.internal.processors.cache.WalStateAckMessage; import org.apache.ignite.internal.processors.cache.binary.MetadataRequestMessage; import org.apache.ignite.internal.processors.cache.binary.MetadataResponseMessage; +import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionCountersNeighborcastRequest; +import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionCountersNeighborcastResponse; +import org.apache.ignite.internal.processors.cache.distributed.dht.PartitionUpdateCountersMessage; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.latch.LatchAckMessage; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionSupplyMessageV2; import org.apache.ignite.internal.processors.cache.distributed.GridCacheTtlUpdateRequest; import org.apache.ignite.internal.processors.cache.distributed.GridCacheTxRecoveryRequest; import org.apache.ignite.internal.processors.cache.distributed.GridCacheTxRecoveryResponse; @@ -123,6 +128,8 @@ import org.apache.ignite.internal.processors.cache.version.GridCacheRawVersionedEntry; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.processors.cache.version.GridCacheVersionEx; +import org.apache.ignite.internal.processors.cluster.ClusterMetricsUpdateMessage; +import org.apache.ignite.internal.processors.continuous.ContinuousRoutineStartResultMessage; import org.apache.ignite.internal.processors.continuous.GridContinuousMessage; import org.apache.ignite.internal.processors.datastreamer.DataStreamerEntry; import org.apache.ignite.internal.processors.datastreamer.DataStreamerRequest; @@ -162,6 +169,7 @@ import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; import org.apache.ignite.spi.communication.tcp.messages.HandshakeMessage; import org.apache.ignite.spi.communication.tcp.messages.HandshakeMessage2; +import org.apache.ignite.spi.communication.tcp.messages.HandshakeWaitMessage; import org.apache.ignite.spi.communication.tcp.messages.NodeIdMessage; import org.apache.ignite.spi.communication.tcp.messages.RecoveryLastReceivedMessage; @@ -314,6 +322,11 @@ public GridIoMessageFactory(MessageFactory[] ext) { break; + case TcpCommunicationSpi.HANDSHAKE_WAIT_MSG_TYPE: + msg = new HandshakeWaitMessage(); + + break; + case 0: msg = new GridJobCancelRequest(); @@ -864,6 +877,7 @@ public GridIoMessageFactory(MessageFactory[] ext) { break; + // [120..123] - DR case 124: msg = new GridMessageCollection<>(); @@ -909,7 +923,47 @@ public GridIoMessageFactory(MessageFactory[] ext) { break; - // [-3..119] [124..129] [-23..-27] [-36..-55]- this + case 133: + msg = new ClusterMetricsUpdateMessage(); + + break; + + case 134: + msg = new ContinuousRoutineStartResultMessage(); + + break; + + case 135: + msg = new LatchAckMessage(); + + break; + + case 157: + msg = new PartitionUpdateCountersMessage(); + + break; + + case 158: + msg = new GridDhtPartitionSupplyMessageV2(); + + break; + + case 165: + msg = new PartitionCountersNeighborcastRequest(); + + break; + + case 166: + msg = new PartitionCountersNeighborcastResponse(); + + break; + + case GridIoSecurityAwareMessage.TYPE_CODE: + msg = new GridIoSecurityAwareMessage(); + + break; + + // [-3..119] [124..129] [-23..-28] [-36..-55] - this // [120..123] - DR // [-4..-22, -30..-35] - SQL // [2048..2053] - Snapshots diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoSecurityAwareMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoSecurityAwareMessage.java new file mode 100644 index 0000000000000..825644ddb0bf7 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoSecurityAwareMessage.java @@ -0,0 +1,163 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.communication; + +import java.io.Externalizable; +import java.nio.ByteBuffer; +import java.util.UUID; +import org.apache.ignite.plugin.extensions.communication.Message; +import org.apache.ignite.plugin.extensions.communication.MessageReader; +import org.apache.ignite.plugin.extensions.communication.MessageWriter; + +/** + * Represents a security communication message. + */ +public class GridIoSecurityAwareMessage extends GridIoMessage { + /** */ + private static final long serialVersionUID = 0L; + /** */ + public static final short TYPE_CODE = 174; + + /** Security subject id that will be used during message processing on an remote node. */ + private UUID secSubjId; + + /** Security context transmitting from node initiator of action. */ + private byte[] secCtx; + + /** + * No-op constructor to support {@link Externalizable} interface. + * This constructor is not meant to be used for other purposes. + */ + public GridIoSecurityAwareMessage() { + // No-op. + } + + /** + * @param secSubjId Security subject id. + * @param plc Policy. + * @param topic Communication topic. + * @param topicOrd Topic ordinal value. + * @param msg Message. + * @param ordered Message ordered flag. + * @param timeout Timeout. + * @param skipOnTimeout Whether message can be skipped on timeout. + */ + public GridIoSecurityAwareMessage( + UUID secSubjId, + byte[] secSubject, + byte plc, + Object topic, + int topicOrd, + Message msg, + boolean ordered, + long timeout, + boolean skipOnTimeout) { + super(plc, topic, topicOrd, msg, ordered, timeout, skipOnTimeout); + + this.secSubjId = secSubjId; + this.secCtx = secSubject; + } + + /** + * @return Security subject id. + */ + UUID secSubjId() { + return secSubjId; + } + + /** + * @return Security context + */ + public byte[] getSecCtx() { + return secCtx; + } + + /** {@inheritDoc} */ + @Override public short directType() { + return TYPE_CODE; + } + + /** {@inheritDoc} */ + @Override public byte fieldsCount() { + return 9; + } + + /** {@inheritDoc} */ + @Override public boolean writeTo(ByteBuffer buf, MessageWriter writer) { + writer.setBuffer(buf); + + if (!super.writeTo(buf, writer)) + return false; + + if (!writer.isHeaderWritten()) { + if (!writer.writeHeader(directType(), fieldsCount())) + return false; + + writer.onHeaderWritten(); + } + + switch (writer.state()) { + case 7: + if (!writer.writeByteArray("secCtx", secCtx)) + return false; + + writer.incrementState(); + + case 8: + if (!writer.writeUuid("secSubjId", secSubjId)) + return false; + + writer.incrementState(); + + } + + return true; + } + + /** {@inheritDoc} */ + @Override public boolean readFrom(ByteBuffer buf, MessageReader reader) { + reader.setBuffer(buf); + + if (!reader.beforeMessageRead()) + return false; + + if (!super.readFrom(buf, reader)) + return false; + + switch (reader.state()) { + case 7: + secCtx = reader.readByteArray("secCtx"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + case 8: + secSubjId = reader.readUuid("secSubjId"); + + if (!reader.isLastRead()) + return false; + + reader.incrementState(); + + } + + return reader.afterMessageRead(GridIoSecurityAwareMessage.class); + } +} \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoUserMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoUserMessage.java index 332a9de511c79..408fad773f93e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoUserMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/GridIoUserMessage.java @@ -358,4 +358,4 @@ public void deployment(GridDeployment dep) { @Override public String toString() { return S.toString(GridIoUserMessage.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/IgniteIoTestMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/IgniteIoTestMessage.java index 0a8b2b7a38e60..a6a2469736ef2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/IgniteIoTestMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/communication/IgniteIoTestMessage.java @@ -20,6 +20,7 @@ import java.nio.ByteBuffer; import java.util.UUID; import org.apache.ignite.internal.GridDirectTransient; +import org.apache.ignite.internal.IgniteCodeGeneratingFail; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.plugin.extensions.communication.Message; import org.apache.ignite.plugin.extensions.communication.MessageReader; @@ -28,6 +29,7 @@ /** * */ +@IgniteCodeGeneratingFail public class IgniteIoTestMessage implements Message { /** */ private static byte FLAG_PROC_FROM_NIO = 1; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java index c3efc592b1347..053cc6cad5566 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeployment.java @@ -30,21 +30,26 @@ import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicStampedReference; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.compute.ComputeTask; import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.internal.processors.task.GridInternal; +import org.apache.ignite.internal.processors.task.GridVisorManagementTask; import org.apache.ignite.internal.util.GridLeanSet; import org.apache.ignite.internal.util.lang.GridMetadataAwareAdapter; import org.apache.ignite.internal.util.lang.GridPeerDeployAware; import org.apache.ignite.internal.util.lang.GridTuple; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteUuid; +import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import java.util.concurrent.ConcurrentHashMap; @@ -383,6 +388,20 @@ public boolean internalTask(@Nullable ComputeTask task, Class taskCls) { return res; } + /** + * Checks whether task class is annotated with {@link GridVisorManagementTask}. + * + * @param task Task. + * @param taskCls Task class. + * @return {@code True} if task is internal. + */ + @SuppressWarnings("unchecked") + public boolean visorManagementTask(@Nullable ComputeTask task, @NotNull Class taskCls) { + return annotation(task instanceof GridPeerDeployAware ? + ((GridPeerDeployAware)task).deployClass() : taskCls, + GridVisorManagementTask.class) != null; + } + /** * @param cls Class to create new instance of (using default constructor). * @return New instance. @@ -441,7 +460,7 @@ public Class existingDeployedClass(String clsName) { if (cls == null) { try { - cls = Class.forName(clsName, true, clsLdr); + cls = U.forName(clsName, clsLdr); Class cur = clss.putIfAbsent(clsName, cls); @@ -462,7 +481,7 @@ public Class existingDeployedClass(String clsName) { return cls; else if (!a.equals(clsName)) { try { - cls = Class.forName(a, true, clsLdr); + cls = U.forName(a, clsLdr); } catch (ClassNotFoundException ignored0) { continue; @@ -485,6 +504,10 @@ else if (!a.equals(clsName)) { } } } + catch (IgniteException e) { + if (!X.hasCause(e, TimeoutException.class)) + throw e; + } } return cls; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java index ca9ce328b6b5c..531d6c254f0d3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentClassLoader.java @@ -28,7 +28,9 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeoutException; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.DeploymentMode; @@ -37,6 +39,7 @@ import org.apache.ignite.internal.util.GridByteArrayList; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteUuid; @@ -445,6 +448,9 @@ private boolean isLocallyExcluded(String name) { // Catch Throwable to secure against any errors resulted from // corrupted class definitions or other user errors. catch (Exception e) { + if (X.hasCause(e, TimeoutException.class)) + throw e; + throw new ClassNotFoundException("Failed to load class due to unexpected error: " + name, e); } @@ -581,6 +587,8 @@ private GridByteArrayList sendClassRequest(String name, String path) throws Clas IgniteCheckedException err = null; + TimeoutException te = null; + for (UUID nodeId : nodeListCp) { if (nodeId.equals(ctx.discovery().localNode().id())) // Skip local node as it is already used as parent class loader. @@ -598,7 +606,14 @@ private GridByteArrayList sendClassRequest(String name, String path) throws Clas } try { - GridDeploymentResponse res = comm.sendResourceRequest(path, ldrId, node, endTime); + GridDeploymentResponse res = null; + + try { + res = comm.sendResourceRequest(path, ldrId, node, endTime); + } + catch (TimeoutException e) { + te = e; + } if (res == null) { String msg = "Failed to send class-loading request to node (is node alive?) [node=" + @@ -657,12 +672,28 @@ else if (log.isDebugEnabled()) } } + if (te != null) { + err.addSuppressed(te); + + throw new IgniteException(err); + } + throw new ClassNotFoundException("Failed to peer load class [class=" + name + ", nodeClsLdrs=" + nodeLdrMapCp + ", parentClsLoader=" + getParent() + ']', err); } /** {@inheritDoc} */ @Nullable @Override public InputStream getResourceAsStream(String name) { + try { + return getResourceAsStreamEx(name); + } + catch (TimeoutException ignore) { + return null; + } + } + + /** */ + @Nullable public InputStream getResourceAsStreamEx(String name) throws TimeoutException { assert !Thread.holdsLock(mux); if (byteMap != null && name.endsWith(".class")) { @@ -702,7 +733,7 @@ else if (log.isDebugEnabled()) * @param name Resource name. * @return InputStream for resource or {@code null} if resource could not be found. */ - @Nullable private InputStream sendResourceRequest(String name) { + @Nullable private InputStream sendResourceRequest(String name) throws TimeoutException { assert !Thread.holdsLock(mux); long endTime = computeEndTime(p2pTimeout); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java index 2a5f7cae1224b..973c51ecfe02f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentCommunication.java @@ -22,6 +22,7 @@ import java.util.Collection; import java.util.HashSet; import java.util.UUID; +import java.util.concurrent.TimeoutException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cluster.ClusterNode; @@ -204,9 +205,9 @@ private void processResourceRequest(UUID nodeId, GridDeploymentRequest req) { // since it was already performed before (and was successful). if (!(ldr instanceof GridDeploymentClassLoader)) { // First check for @GridNotPeerDeployable annotation. - try { - String clsName = req.resourceName().replace('/', '.'); + String clsName = req.resourceName().replace('/', '.'); + try { int idx = clsName.indexOf(".class"); if (idx >= 0) @@ -228,8 +229,10 @@ private void processResourceRequest(UUID nodeId, GridDeploymentRequest req) { return; } } - catch (ClassNotFoundException ignore) { - // Safely ignore it here - resource wasn't a class name. + catch (LinkageError | ClassNotFoundException e) { + U.warn(log, "Failed to resolve class: " + clsName, e); + // Defined errors can be safely ignored here, because of resource which is able to be not a class name. + // Unsuccessful response will be sent below if the resource failed to be loaded. } } @@ -353,7 +356,7 @@ void sendUndeployRequest(String rsrcName, Collection rmtNodes) thro */ @SuppressWarnings({"SynchronizationOnLocalVariableOrMethodParameter"}) GridDeploymentResponse sendResourceRequest(final String rsrcName, IgniteUuid clsLdrId, - final ClusterNode dstNode, long threshold) throws IgniteCheckedException { + final ClusterNode dstNode, long threshold) throws IgniteCheckedException, TimeoutException { assert rsrcName != null; assert dstNode != null; assert clsLdrId != null; @@ -470,13 +473,21 @@ GridDeploymentResponse sendResourceRequest(final String rsrcName, IgniteUuid cls timeout = threshold - U.currentTimeMillis(); } + + if (timeout <= 0) + throw new TimeoutException(); } catch (InterruptedException e) { // Interrupt again to get it in the users code. Thread.currentThread().interrupt(); - throw new IgniteCheckedException("Got interrupted while waiting for response from node: " + - dstNode.id(), e); + TimeoutException te = new TimeoutException( + "Got interrupted while waiting for response from node: " + dstNode.id() + ); + + te.initCause(e); + + throw te; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentInfoBean.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentInfoBean.java index 7f58ce36001ab..72f5ec6b30579 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentInfoBean.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentInfoBean.java @@ -277,4 +277,4 @@ public GridDeploymentInfoBean(GridDeploymentInfo dep) { @Override public String toString() { return S.toString(GridDeploymentInfoBean.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java index b27cc4bd0275f..1d36571e7b1bd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentLocalStore.java @@ -188,7 +188,7 @@ class GridDeploymentLocalStore extends GridDeploymentStoreAdapter { // Check that class can be loaded. String clsName = meta.className(); - Class cls = Class.forName(clsName != null ? clsName : alias, true, ldr); + Class cls = U.forName(clsName != null ? clsName : alias, ldr); spi.register(ldr, cls); @@ -227,6 +227,11 @@ class GridDeploymentLocalStore extends GridDeploymentStoreAdapter { return dep; } + /** {@inheritDoc} */ + @Override public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta) { + return deployment(meta.alias()); + } + /** * @param alias Class alias. * @return Deployment. @@ -446,7 +451,7 @@ private void recordDeployFailed(Class cls, ClassLoader clsLdr, boolean record evt.message(msg); evt.node(ctx.discovery().localNode()); - evt.type(isTask(cls) ? EVT_CLASS_DEPLOY_FAILED : EVT_TASK_DEPLOY_FAILED); + evt.type(isTask ? EVT_CLASS_DEPLOY_FAILED : EVT_TASK_DEPLOY_FAILED); evt.alias(taskName); ctx.event().record(evt); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java index cea178604be50..04cfd60610b42 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentManager.java @@ -458,6 +458,11 @@ else if (locDep != null) { } } + GridDeployment dep = verStore.searchDeploymentCache(meta); + + if (dep != null) + return dep; + if (reuse) { GridDeployment locDep = locStore.getDeployment(meta); @@ -470,7 +475,7 @@ else if (locDep != null) { "in some other mode). Either change IgniteConfiguration.getDeploymentMode() property to " + "SHARED or CONTINUOUS or remove class from local classpath and any of " + "the local GAR deployments that may have it [cls=" + meta.className() + ", depMode=" + - locDep.deployMode() + ']', "Failed to deploy class in SHARED or CONTINUOUS mode."); + locDep.deployMode() + ']'); return null; } @@ -478,8 +483,7 @@ else if (locDep != null) { if (!locDep.userVersion().equals(meta.userVersion())) { U.warn(log, "Failed to deploy class in SHARED or CONTINUOUS mode for given user version " + "(class is locally deployed for a different user version) [cls=" + meta.className() + - ", localVer=" + locDep.userVersion() + ", otherVer=" + meta.userVersion() + ']', - "Failed to deploy class in SHARED or CONTINUOUS mode."); + ", localVer=" + locDep.userVersion() + ", otherVer=" + meta.userVersion() + ']'); return null; } @@ -497,7 +501,12 @@ else if (locDep != null) { // Private or Isolated mode. meta.record(false); - GridDeployment dep = locStore.getDeployment(meta); + GridDeployment dep = ldrStore.searchDeploymentCache(meta); + + if (dep != null) + return dep; + + dep = locStore.getDeployment(meta); if (sndNodeId.equals(ctx.localNodeId())) { if (dep == null) @@ -664,4 +673,4 @@ private LocalDeployment(DeploymentMode depMode, ClassLoader clsLdr, IgniteUuid c return S.toString(LocalDeployment.class, this, super.toString()); } } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java index 4ba308c9ef6ef..0477523949fce 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerLoaderStore.java @@ -219,7 +219,7 @@ public class GridDeploymentPerLoaderStore extends GridDeploymentStoreAdapter { IsolatedDeployment dep; synchronized (mux) { - dep = cache.get(meta.classLoaderId()); + dep = (IsolatedDeployment)searchDeploymentCache(meta); if (dep == null) { long undeployTimeout = 0; @@ -331,6 +331,11 @@ else if (d.sequenceNumber() > meta.sequenceNumber()) { return dep; } + /** {@inheritDoc} */ + @Override public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta) { + return cache.get(meta.classLoaderId()); + } + /** {@inheritDoc} */ @Override public void addParticipants(Map allParticipants, Map addedParticipants) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java index 14aa49f60a05b..75160f089bd66 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentPerVersionStore.java @@ -29,6 +29,7 @@ import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.TimeoutException; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.configuration.DeploymentMode; import org.apache.ignite.events.DeploymentEvent; @@ -277,6 +278,26 @@ else if (log.isDebugEnabled()) } } + /** {@inheritDoc} */ + @Override public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta) { + List deps = null; + + synchronized (mux) { + deps = cache.get(meta.userVersion()); + } + + if (deps != null) { + assert !deps.isEmpty(); + + for (SharedDeployment d : deps) { + if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId())) + return d; + } + } + + return null; + } + /** {@inheritDoc} */ @Override @Nullable public GridDeployment getDeployment(GridDeploymentMetadata meta) { assert meta != null; @@ -356,22 +377,14 @@ else if (ctx.discovery().node(meta.senderNodeId()) == null) { return null; } - List deps = cache.get(meta.userVersion()); + dep = (SharedDeployment)searchDeploymentCache(meta); - if (deps != null) { - assert !deps.isEmpty(); + if (dep == null) { + List deps = cache.get(meta.userVersion()); - for (SharedDeployment d : deps) { - if (d.hasParticipant(meta.senderNodeId(), meta.classLoaderId()) || - meta.senderNodeId().equals(ctx.localNodeId())) { - // Done. - dep = d; + if (deps != null) { + assert !deps.isEmpty(); - break; - } - } - - if (dep == null) { checkRedeploy(meta); // Find existing deployments that need to be checked @@ -413,12 +426,12 @@ else if (ctx.discovery().node(meta.senderNodeId()) == null) { deps.add(dep); } } - } - else { - checkRedeploy(meta); + else { + checkRedeploy(meta); - // Create peer class loader. - dep = createNewDeployment(meta, true); + // Create peer class loader. + dep = createNewDeployment(meta, true); + } } } @@ -689,7 +702,7 @@ private boolean checkLoadRemoteClass(String clsName, GridDeploymentMetadata meta return false; // Temporary class loader. - ClassLoader temp = new GridDeploymentClassLoader( + GridDeploymentClassLoader temp = new GridDeploymentClassLoader( IgniteUuid.fromUuid(ctx.localNodeId()), meta.userVersion(), meta.deploymentMode(), @@ -712,7 +725,14 @@ private boolean checkLoadRemoteClass(String clsName, GridDeploymentMetadata meta InputStream rsrcIn = null; try { - rsrcIn = temp.getResourceAsStream(path); + boolean timeout = false; + + try { + rsrcIn = temp.getResourceAsStreamEx(path); + } + catch (TimeoutException e) { + timeout = true; + } boolean found = rsrcIn != null; @@ -732,7 +752,7 @@ private boolean checkLoadRemoteClass(String clsName, GridDeploymentMetadata meta return false; } - else + else if (!timeout) // Cache result if classloader is still alive. ldrRsrcCache.put(clsName, found); } @@ -1190,8 +1210,6 @@ boolean hasParticipant(UUID nodeId, IgniteUuid ldrId) { assert nodeId != null; assert ldrId != null; - assert Thread.holdsLock(mux); - return classLoader().hasRegisteredNode(nodeId, ldrId); } @@ -1290,6 +1308,12 @@ void recordUndeployed(@Nullable UUID leftNodeId) { ctx.cache().onUndeployed(ldr); + // Clear static class cache. + U.clearClassFromClassCache(ctx.cache().context().deploy().globalLoader(), sampleClassName()); + + for (String alias : deployedClassMap().keySet()) + U.clearClassFromClassCache(ctx.cache().context().deploy().globalLoader(), alias); + // Clear optimized marshaller's cache. if (ctx.config().getMarshaller() instanceof AbstractMarshaller) ((AbstractMarshaller)ctx.config().getMarshaller()).onUndeploy(ldr); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentRequest.java index 729cf4c54278e..708c64860579e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentRequest.java @@ -278,4 +278,4 @@ public void nodeIds(Collection nodeIds) { @Override public String toString() { return S.toString(GridDeploymentRequest.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentResponse.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentResponse.java index d1b0384f0fb6a..591957d37bd2f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentResponse.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentResponse.java @@ -197,4 +197,4 @@ void errorMessage(String errMsg) { @Override public String toString() { return S.toString(GridDeploymentResponse.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java index 07e1e22750a5f..d529eaf47a1b9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/deployment/GridDeploymentStore.java @@ -58,6 +58,12 @@ public interface GridDeploymentStore { */ @Nullable public GridDeployment getDeployment(GridDeploymentMetadata meta); + /** + * @param meta Deployment meatdata. + * @return Grid deployment instance if it was finded in cache, {@code null} otherwise. + */ + @Nullable public GridDeployment searchDeploymentCache(GridDeploymentMetadata meta); + /** * Gets class loader based on ID. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/ClusterMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/ClusterMetricsImpl.java index 4138e64199da1..3b4330f38b71c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/ClusterMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/ClusterMetricsImpl.java @@ -18,9 +18,12 @@ package org.apache.ignite.internal.managers.discovery; import java.util.Collection; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; import org.apache.ignite.cluster.ClusterMetrics; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.processors.cache.GridCacheAdapter; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; import org.apache.ignite.internal.processors.jobmetrics.GridJobMetrics; import org.apache.ignite.internal.util.typedef.internal.U; @@ -351,6 +354,14 @@ public ClusterMetricsImpl(GridKernalContext ctx, GridLocalMetrics vmMetrics, lon return 1; } + /** {@inheritDoc} */ + @Override public long getCurrentPmeDuration() { + GridDhtPartitionsExchangeFuture future = ctx.cache().context().exchange().lastTopologyFuture(); + + return (future == null || future.isDone()) ? + 0 : TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - future.getInitTime()); + } + /** * Job metrics */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/ConsistentIdMapper.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/ConsistentIdMapper.java index 59f773dd3c39b..ac72afeeb185d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/ConsistentIdMapper.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/ConsistentIdMapper.java @@ -99,8 +99,6 @@ public Map> mapToCompactIds( if (txNodes == null) return null; - Map constIdMap = baselineTop.consistentIdMapping(); - Map m = discoveryMgr.consistentId(topVer); int bltNodes = m.size(); @@ -112,15 +110,19 @@ public Map> mapToCompactIds( for (Map.Entry> e : txNodes.entrySet()) { UUID node = e.getKey(); + if (!m.containsKey(node)) // not in blt + continue; + Collection backupNodes = e.getValue(); Collection backups = new ArrayList<>(backupNodes.size()); for (UUID backup : backupNodes) { - if (m.containsKey(backup)) + if (m.containsKey(backup)) { nodeCnt++; - backups.add(mapToCompactId(topVer, backup)); + backups.add(mapToCompactId(topVer, backup)); + } } // Optimization for short store full nodes set. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/CustomMessageWrapper.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/CustomMessageWrapper.java index 426888631729d..c7feba3cc22bd 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/CustomMessageWrapper.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/CustomMessageWrapper.java @@ -33,7 +33,7 @@ public class CustomMessageWrapper implements DiscoverySpiCustomMessage { /** * @param delegate Delegate. */ - CustomMessageWrapper(DiscoveryCustomMessage delegate) { + public CustomMessageWrapper(DiscoveryCustomMessage delegate) { this.delegate = delegate; } @@ -49,6 +49,11 @@ public class CustomMessageWrapper implements DiscoverySpiCustomMessage { return delegate.isMutable(); } + /** {@inheritDoc} */ + @Override public boolean stopProcess() { + return delegate.stopProcess(); + } + /** * @return Delegate. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java index c21698f4b71ed..8cdcbf367330d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoCache.java @@ -164,10 +164,8 @@ public class DiscoCache { this.consIdxToNodeId = consIdxToNodeId; aliveBaselineNodePred = new P1() { - @Override - public boolean apply(BaselineNode node) { + @Override public boolean apply(BaselineNode node) { return node instanceof ClusterNode && alives.contains(((ClusterNode)node).id()); - } }; @@ -311,6 +309,24 @@ public boolean baselineNode(ClusterNode node) { return null; } + /** + * @return Oldest server node. + */ + @Nullable public ClusterNode oldestServerNode(){ + if (srvNodes.size() > 0) + return srvNodes.get(0); + + return null; + } + + /** + * @param nodeId Node ID. + * @return {@code True} if node is in alives list. + */ + public boolean alive(UUID nodeId) { + return alives.contains(nodeId); + } + /** * Gets all nodes that have cache with given name. * @@ -407,6 +423,26 @@ else if (cmp > 0) return -(low + 1); } + /** + * + * Returns {@code True} if all nodes has the given attribute and its value equals to {@code expVal}. + * + * @param Attribute Type. + * @param name Attribute name. + * @param expVal Expected value. + * @return {@code True} if all the given nodes has the given attribute and its value equals to {@code expVal}. + */ + public boolean checkAttribute(String name, T expVal) { + for (ClusterNode node : allNodes) { + T attr = node.attribute(name); + + if (attr == null || !expVal.equals(attr)) + return false; + } + + return true; + } + /** * @param nodes Cluster nodes. * @return Empty collection if nodes list is {@code null} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoveryCustomMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoveryCustomMessage.java index c708c6247b2c2..6ed2096faf386 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoveryCustomMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoveryCustomMessage.java @@ -20,6 +20,7 @@ import java.io.Serializable; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.lang.IgniteUuid; +import org.apache.ignite.spi.discovery.DiscoverySpiCustomMessage; import org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryNodeAddFinishedMessage; import org.apache.ignite.spi.discovery.tcp.messages.TcpDiscoveryNodeAddedMessage; import org.jetbrains.annotations.Nullable; @@ -87,10 +88,17 @@ public interface DiscoveryCustomMessage extends Serializable { @Nullable public DiscoveryCustomMessage ackMessage(); /** - * @return {@code true} if message can be modified during listener notification. Changes will be send to next nodes. + * @return {@code True} if message can be modified during listener notification. Changes will be sent to next nodes. */ public boolean isMutable(); + /** + * See {@link DiscoverySpiCustomMessage#stopProcess()}. + * + * @return {@code True} if message should not be sent to others nodes after it was processed on coordinator. + */ + public boolean stopProcess(); + /** * Creates new discovery cache if message caused topology version change. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoveryMessageResultsCollector.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoveryMessageResultsCollector.java new file mode 100644 index 0000000000000..43be952ea9137 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/DiscoveryMessageResultsCollector.java @@ -0,0 +1,222 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.discovery; + +import java.util.HashMap; +import java.util.Map; +import java.util.UUID; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.GridKernalContext; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.jetbrains.annotations.Nullable; + +/** + * + */ +public abstract class DiscoveryMessageResultsCollector { + /** */ + private final Map> rcvd = new HashMap<>(); + + /** */ + private int leftMsgs; + + /** */ + protected DiscoCache discoCache; + + /** */ + protected final GridKernalContext ctx; + + /** + * @param ctx Context. + */ + protected DiscoveryMessageResultsCollector(GridKernalContext ctx) { + this.ctx = ctx; + } + + /** + * @param rcvd Received messages. + * @return Result. + */ + protected abstract R createResult(Map> rcvd); + + /** + * @param r Result. + */ + protected abstract void onResultsCollected(R r); + + /** + * @param discoCache Discovery state when discovery message was received. + * @param node Node. + * @return {@code True} if need wait for result from given node. + */ + protected abstract boolean waitForNode(DiscoCache discoCache, ClusterNode node); + + /** + * @param discoCache Discovery state. + */ + public final void init(DiscoCache discoCache) { + assert discoCache != null; + + R res = null; + + synchronized (this) { + assert this.discoCache == null; + assert leftMsgs == 0 : leftMsgs; + + this.discoCache = discoCache; + + for (ClusterNode node : discoCache.allNodes()) { + if (ctx.discovery().alive(node) && waitForNode(discoCache, node) && !rcvd.containsKey(node.id())) { + rcvd.put(node.id(), new NodeMessage<>((M)null)); + + leftMsgs++; + } + } + + if (leftMsgs == 0) + res = createResult(rcvd); + } + + if (res != null) + onResultsCollected(res); + } + + /** + * @param nodeId Node ID. + * @param msg Message. + */ + public final void onMessage(UUID nodeId, M msg) { + R res = null; + + synchronized (this) { + if (allReceived()) + return; + + NodeMessage expMsg = rcvd.get(nodeId); + + if (expMsg == null) + rcvd.put(nodeId, new NodeMessage<>(msg)); + else if (expMsg.set(msg)) { + assert leftMsgs > 0; + + leftMsgs--; + + if (allReceived()) + res = createResult(rcvd); + } + } + + if (res != null) + onResultsCollected(res); + } + + /** + * @param nodeId Failed node ID. + */ + public final void onNodeFail(UUID nodeId) { + R res = null; + + synchronized (this) { + if (allReceived()) + return; + + NodeMessage expMsg = rcvd.get(nodeId); + + if (expMsg != null && expMsg.onNodeFailed()) { + assert leftMsgs > 0 : leftMsgs; + + leftMsgs--; + + if (allReceived()) + res = createResult(rcvd); + } + } + + if (res != null) + onResultsCollected(res); + } + + /** + * @return {@code True} if expected messages are initialized and all message are received. + */ + private boolean allReceived() { + return discoCache != null && leftMsgs == 0; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(DiscoveryMessageResultsCollector.class, this); + } + + /** + * + */ + protected static class NodeMessage { + /** */ + boolean nodeFailed; + + /** */ + M msg; + + /** + * @param msg Message. + */ + NodeMessage(M msg) { + this.msg = msg; + } + + /** + * @return Message or {@code null} if node failed. + */ + @Nullable public M message() { + return msg; + } + + /** + * @return {@code True} if node result was not set before. + */ + boolean onNodeFailed() { + if (nodeFailed || msg != null) + return false; + + nodeFailed = true; + + return true; + } + + /** + * @param msg Received message. + * @return {@code True} if node result was not set before. + */ + boolean set(M msg) { + assert msg != null; + + if (this.msg != null) + return false; + + this.msg = msg; + + return !nodeFailed; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(NodeMessage.class, this); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java index 2e814d45aeeae..0fed03ce86542 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/GridDiscoveryManager.java @@ -48,16 +48,21 @@ import org.apache.ignite.IgniteClientDisconnectedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteInterruptedException; +import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cache.CacheMetrics; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cluster.BaselineNode; import org.apache.ignite.cluster.ClusterMetrics; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.CommunicationFailureResolver; import org.apache.ignite.configuration.DataRegionConfiguration; import org.apache.ignite.configuration.DataStorageConfiguration; +import org.apache.ignite.configuration.DefaultCommunicationFailureResolver; +import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.events.Event; +import org.apache.ignite.events.EventType; import org.apache.ignite.failure.FailureContext; import org.apache.ignite.failure.FailureType; import org.apache.ignite.failure.RestartProcessFailureHandler; @@ -66,7 +71,9 @@ import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException; import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.IgniteKernal; +import org.apache.ignite.internal.NodeStoppingException; import org.apache.ignite.internal.cluster.NodeOrderComparator; import org.apache.ignite.internal.events.DiscoveryCustomEvent; import org.apache.ignite.internal.managers.GridManagerAdapter; @@ -92,11 +99,13 @@ import org.apache.ignite.internal.util.GridSpinBusyLock; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; +import org.apache.ignite.internal.util.future.IgniteFutureImpl; import org.apache.ignite.internal.util.lang.GridTuple6; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.typedef.CI1; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.P1; +import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.LT; import org.apache.ignite.internal.util.typedef.internal.S; @@ -112,6 +121,8 @@ import org.apache.ignite.plugin.security.SecurityCredentials; import org.apache.ignite.plugin.segmentation.SegmentationPolicy; import org.apache.ignite.spi.IgniteSpiException; +import org.apache.ignite.spi.communication.CommunicationSpi; +import org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi; import org.apache.ignite.spi.discovery.DiscoveryDataBag; import org.apache.ignite.spi.discovery.DiscoveryDataBag.JoiningNodeDiscoveryData; import org.apache.ignite.spi.discovery.DiscoveryMetricsProvider; @@ -120,11 +131,14 @@ import org.apache.ignite.spi.discovery.DiscoverySpiDataExchange; import org.apache.ignite.spi.discovery.DiscoverySpiHistorySupport; import org.apache.ignite.spi.discovery.DiscoverySpiListener; +import org.apache.ignite.spi.discovery.DiscoverySpiMutableCustomMessageSupport; import org.apache.ignite.spi.discovery.DiscoverySpiNodeAuthenticator; import org.apache.ignite.spi.discovery.DiscoverySpiOrderSupport; +import org.apache.ignite.spi.discovery.IgniteDiscoveryThread; import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode; import org.apache.ignite.thread.IgniteThread; +import org.apache.ignite.thread.OomExceptionHandler; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; @@ -142,6 +156,8 @@ import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; import static org.apache.ignite.events.EventType.EVT_NODE_METRICS_UPDATED; import static org.apache.ignite.events.EventType.EVT_NODE_SEGMENTED; +import static org.apache.ignite.failure.FailureType.CRITICAL_ERROR; +import static org.apache.ignite.failure.FailureType.SYSTEM_WORKER_TERMINATION; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_DATA_REGIONS_OFFHEAP_SIZE; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_DEPLOYMENT_MODE; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_LATE_AFFINITY_ASSIGNMENT; @@ -198,7 +214,7 @@ public class GridDiscoveryManager extends GridManagerAdapter { /** Predicate filtering client nodes. */ private static final IgnitePredicate FILTER_CLI = new P1() { @Override public boolean apply(ClusterNode n) { - return CU.clientNode(n); + return n.isClient(); } }; @@ -208,6 +224,9 @@ public class GridDiscoveryManager extends GridManagerAdapter { /** Discovery event worker. */ private final DiscoveryWorker discoWrk = new DiscoveryWorker(); + /** Discovery event notyfier worker. */ + private final DiscoveryMessageNotifierWorker discoNtfWrk = new DiscoveryMessageNotifierWorker(); + /** Network segment check worker. */ private SegmentCheckWorker segChkWrk; @@ -359,7 +378,7 @@ public void cleanCachesAndGroups() { */ public void addCacheGroup(CacheGroupDescriptor grpDesc, IgnitePredicate filter, CacheMode cacheMode) { CacheGroupAffinity old = registeredCacheGrps.put(grpDesc.groupId(), - new CacheGroupAffinity(grpDesc.cacheOrGroupName(), filter, cacheMode)); + new CacheGroupAffinity(grpDesc.cacheOrGroupName(), filter, cacheMode, grpDesc.persistenceEnabled())); assert old == null : old; } @@ -478,7 +497,7 @@ private void updateClientNodes(UUID leftNodeId) { /** {@inheritDoc} */ @Override protected void onKernalStart0() throws IgniteCheckedException { - if (Boolean.TRUE.equals(ctx.config().isClientMode()) && !getSpi().isClientMode()) + if ((getSpi() instanceof TcpDiscoverySpi) && Boolean.TRUE.equals(ctx.config().isClientMode()) && !getSpi().isClientMode()) ctx.performance().add("Enable client mode for TcpDiscoverySpi " + "(set TcpDiscoverySpi.forceServerMode to false)"); } @@ -551,6 +570,9 @@ private void updateClientNodes(UUID leftNodeId) { }); } + if (ctx.config().getCommunicationFailureResolver() != null) + ctx.resource().injectGeneric(ctx.config().getCommunicationFailureResolver()); + spi.setListener(new DiscoverySpiListener() { private long gridStartTime; @@ -559,24 +581,31 @@ private void updateClientNodes(UUID leftNodeId) { for (IgniteInClosure lsnr : locNodeInitLsnrs) lsnr.apply(locNode); - if (locNode instanceof TcpDiscoveryNode) { - final TcpDiscoveryNode node = (TcpDiscoveryNode)locNode; + if (locNode instanceof IgniteClusterNode) { + final IgniteClusterNode node = (IgniteClusterNode)locNode; if (consistentId != null) node.setConsistentId(consistentId); } } - @Override public void onDiscovery( + @Override public IgniteFuture onDiscovery( final int type, final long topVer, final ClusterNode node, final Collection topSnapshot, final Map> snapshots, - @Nullable DiscoverySpiCustomMessage spiCustomMsg) { - synchronized (discoEvtMux) { - onDiscovery0(type, topVer, node, topSnapshot, snapshots, spiCustomMsg); - } + @Nullable DiscoverySpiCustomMessage spiCustomMsg + ) { + GridFutureAdapter notificationFut = new GridFutureAdapter<>(); + + discoNtfWrk.submit(notificationFut, () -> { + synchronized (discoEvtMux) { + onDiscovery0(type, topVer, node, topSnapshot, snapshots, spiCustomMsg); + } + }); + + return new IgniteFutureImpl<>(notificationFut); } /** @@ -676,11 +705,15 @@ else if (customMsg instanceof ChangeGlobalStateFinishMessage) { } nextTopVer = new AffinityTopologyVersion(topVer, minorTopVer); + + if (incMinorTopVer) + ctx.cache().onDiscoveryEvent(type, customMsg, node, nextTopVer, ctx.state().clusterState()); } - else + else { nextTopVer = new AffinityTopologyVersion(topVer, minorTopVer); - ctx.cache().onDiscoveryEvent(type, customMsg, node, nextTopVer, ctx.state().clusterState()); + ctx.cache().onDiscoveryEvent(type, customMsg, node, nextTopVer, ctx.state().clusterState()); + } if (type == EVT_DISCOVERY_CUSTOM_EVT) { for (Class cls = customMsg.getClass(); cls != null; cls = cls.getSuperclass()) { @@ -736,8 +769,6 @@ else if (customMsg instanceof ChangeGlobalStateMessage) { // Current version. discoCache = discoCache(); - final DiscoCache discoCache0 = discoCache; - // If this is a local join event, just save it and do not notify listeners. if (locJoinEvt) { if (gridStartTime == 0) @@ -783,6 +814,9 @@ else if (type == EVT_CLIENT_NODE_DISCONNECTED) { ((IgniteKernal)ctx.grid()).onDisconnected(); + if (!locJoin.isDone()) + locJoin.onDone(new IgniteCheckedException("Node disconnected")); + locJoin = new GridFutureAdapter<>(); registeredCaches.clear(); @@ -818,7 +852,7 @@ else if (type == EVT_CLIENT_NODE_RECONNECTED) { try { fut.get(); - discoWrk.addEvent(type, nextTopVer, node, discoCache0, topSnapshot, null); + discoWrk.addEvent(EVT_CLIENT_NODE_RECONNECTED, nextTopVer, node, discoCache, topSnapshot, null); } catch (IgniteException ignore) { // No-op. @@ -894,6 +928,8 @@ else if (type == EVT_CLIENT_NODE_RECONNECTED) { } }); + new IgniteThread(discoNtfWrk).start(); + startSpi(); registeredDiscoSpi = true; @@ -911,6 +947,8 @@ else if (type == EVT_CLIENT_NODE_RECONNECTED) { segChkThread = new IgniteThread(segChkWrk); + segChkThread.setUncaughtExceptionHandler(new OomExceptionHandler(ctx)); + segChkThread.start(); } @@ -1052,8 +1090,12 @@ private GridLocalMetrics createMetrics() { /** * @return Metrics provider. */ - private DiscoveryMetricsProvider createMetricsProvider() { + public DiscoveryMetricsProvider createMetricsProvider() { return new DiscoveryMetricsProvider() { + /** Disable cache metrics update. */ + private final boolean disableCacheMetricsUpdate = IgniteSystemProperties.getBoolean( + IgniteSystemProperties.IGNITE_DISCOVERY_DISABLE_CACHE_METRICS_UPDATE, false); + /** */ private final long startTime = U.currentTimeMillis(); @@ -1065,6 +1107,13 @@ private DiscoveryMetricsProvider createMetricsProvider() { /** {@inheritDoc} */ @Override public Map cacheMetrics() { try { + if (disableCacheMetricsUpdate) + return Collections.emptyMap(); + + /** Caches should not be accessed while state transition is in progress. */ + if (ctx.state().clusterState().transition()) + return Collections.emptyMap(); + Collection> caches = ctx.cache().internalCaches(); if (!F.isEmpty(caches)) { @@ -1073,10 +1122,8 @@ private DiscoveryMetricsProvider createMetricsProvider() { for (GridCacheAdapter cache : caches) { if (cache.context().statisticsEnabled() && cache.context().started() && - cache.context().affinity().affinityTopologyVersion().topologyVersion() > 0) { - + cache.context().affinity().affinityTopologyVersion().topologyVersion() > 0) metrics.put(cache.context().cacheId(), cache.localMetrics()); - } } return metrics; @@ -1198,8 +1245,7 @@ private void checkAttributes(Iterable nodes) throws IgniteCheckedEx "(all nodes in topology should have identical value) " + "[locPreferIpV4=" + locPreferIpV4 + ", rmtPreferIpV4=" + rmtPreferIpV4 + ", locId8=" + U.id8(locNode.id()) + ", rmtId8=" + U.id8(n.id()) + - ", rmtAddrs=" + U.addressesAsString(n) + ", rmtNode=" + U.toShortString(n) + "]", - "Local and remote 'java.net.preferIPv4Stack' system properties do not match."); + ", rmtAddrs=" + U.addressesAsString(n) + ", rmtNode=" + U.toShortString(n) + "]"); ipV4Warned = true; } @@ -1356,18 +1402,22 @@ private static int cpus(Collection nodes) { * Prints the latest topology info into log taking into account logging/verbosity settings. * * @param topVer Topology version. + * @param evtType Event type. + * @param evtNode Event node. */ - public void ackTopology(long topVer) { - ackTopology(topVer, false); + public void ackTopology(long topVer, int evtType, ClusterNode evtNode) { + ackTopology(topVer, evtType, evtNode, false); } /** * Logs grid size for license compliance. * * @param topVer Topology version. + * @param evtType Event type. + * @param evtNode Event node. * @param throttle Suppress printing if this topology was already printed. */ - private void ackTopology(long topVer, boolean throttle) { + private void ackTopology(long topVer, int evtType, ClusterNode evtNode, boolean throttle) { assert !isLocDaemon; DiscoCache discoCache = discoCacheHist.get(new AffinityTopologyVersion(topVer)); @@ -1414,7 +1464,7 @@ else if (log.isInfoEnabled()) return null; } - }, topVer, srvNodes.size(), clientNodes.size(), totalCpus, heap, offheap); + }, topVer, discoCache, evtType, evtNode, srvNodes.size(), clientNodes.size(), totalCpus, heap, offheap); if (log.isDebugEnabled()) { String dbg = ""; @@ -1465,7 +1515,7 @@ else if (log.isInfoEnabled()) return null; } - }, topVer, srvNodes.size(), clientNodes.size(), totalCpus, heap, offheap); + }, topVer, discoCache, evtType, evtNode, srvNodes.size(), clientNodes.size(), totalCpus, heap, offheap); } /** @@ -1541,14 +1591,18 @@ private String dataRegionConfigurationMessage(DataRegionConfiguration regCfg) { /** * @param clo Wrapper of logger. * @param topVer Topology version. + * @param discoCache Discovery cache. + * @param evtType Event type. + * @param evtNode Event node. * @param srvNodesNum Server nodes number. * @param clientNodesNum Client nodes number. * @param totalCpus Total cpu number. * @param heap Heap size. * @param offheap Offheap size. */ - private void topologySnapshotMessage(IgniteClosure clo, long topVer, int srvNodesNum, - int clientNodesNum, int totalCpus, double heap, double offheap) { + private void topologySnapshotMessage(IgniteClosure clo, long topVer, DiscoCache discoCache, + int evtType, ClusterNode evtNode, int srvNodesNum, int clientNodesNum, int totalCpus, double heap, + double offheap) { String summary = PREFIX + " [" + (discoOrdered ? "ver=" + topVer + ", " : "") + "servers=" + srvNodesNum + @@ -1559,6 +1613,47 @@ private void topologySnapshotMessage(IgniteClosure clo, long topVe clo.apply(summary); + ClusterNode currCrd = discoCache.oldestServerNode(); + + if ((evtType == EventType.EVT_NODE_FAILED || evtType == EventType.EVT_NODE_LEFT) && + currCrd != null && currCrd.order() > evtNode.order()) + clo.apply("Coordinator changed [prev=" + evtNode + ", cur=" + currCrd + "]"); + + DiscoveryDataClusterState state = discoCache.state(); + + clo.apply(" ^-- Node [id=" + discoCache.localNode().id().toString().toUpperCase() + ", clusterState=" + + (state.active() ? "ACTIVE" : "INACTIVE") + ']'); + + BaselineTopology blt = state.baselineTopology(); + + if (blt != null && discoCache.baselineNodes() != null) { + int bltSize = discoCache.baselineNodes().size(); + int bltOnline = discoCache.aliveBaselineNodes().size(); + int bltOffline = bltSize - bltOnline; + + clo.apply(" ^-- Baseline [id=" + blt.id() + ", size=" + bltSize + ", online=" + bltOnline + + ", offline=" + bltOffline + ']'); + + if (!state.active() && ctx.config().isAutoActivationEnabled()) { + String offlineConsistentIds = ""; + + if (bltOffline > 0 && bltOffline <= 5) { + Collection offlineNodes = new HashSet<>(discoCache.baselineNodes()); + + offlineNodes.removeAll(discoCache.aliveBaselineNodes()); + + offlineConsistentIds = ' ' + F.nodeConsistentIds(offlineNodes).toString(); + } + + if (bltOffline == 0) { + if (evtType == EVT_NODE_JOINED && discoCache.baselineNode(evtNode)) + clo.apply(" ^-- All baseline nodes are online, will start auto-activation"); + } + else + clo.apply(" ^-- " + bltOffline + " nodes left for auto-activation" + offlineConsistentIds); + } + } + DataStorageConfiguration memCfg = ctx.config().getDataStorageConfiguration(); if (memCfg == null) @@ -1591,7 +1686,7 @@ private void topologySnapshotMessage(IgniteClosure clo, long topVe } if (!locJoin.isDone()) - locJoin.onDone(new IgniteCheckedException("Failed to wait for local node joined event (grid is stopping).")); + locJoin.onDone(new NodeStoppingException("Failed to wait for local node joined event (grid is stopping).")); } /** {@inheritDoc} */ @@ -1608,6 +1703,10 @@ private void topologySnapshotMessage(IgniteClosure clo, long topVe U.join(discoWrk, log); + U.cancel(discoNtfWrk); + + U.join(discoNtfWrk, log); + // Stop SPI itself. stopSpi(); @@ -1679,13 +1778,15 @@ public boolean pingNode(UUID nodeId) throws IgniteClientDisconnectedCheckedExcep return getSpi().pingNode(nodeId); } catch (IgniteException e) { - if (e.hasCause(IgniteClientDisconnectedCheckedException.class)) { + if (e.hasCause(IgniteClientDisconnectedCheckedException.class, IgniteClientDisconnectedException.class)) { IgniteFuture reconnectFut = ctx.cluster().clientReconnectFuture(); throw new IgniteClientDisconnectedCheckedException(reconnectFut, e.getMessage()); } - throw e; + LT.warn(log, "Ping failed with error [node=" + nodeId + ", err=" + e + ']'); + + return true; } finally { busyLock.leaveBusy(); @@ -1999,6 +2100,21 @@ private DiscoCache resolveDiscoCache(int grpId, AffinityTopologyVersion topVer) snap.discoCache : discoCacheHist.get(topVer); if (cache == null) { + AffinityTopologyVersion lastAffChangedTopVer = + ctx.cache().context().exchange().lastAffinityChangedTopologyVersion(topVer); + + if (!lastAffChangedTopVer.equals(topVer)) { + assert lastAffChangedTopVer.compareTo(topVer) < 0; + + for (Map.Entry e : discoCacheHist.descendingEntrySet()) { + if (e.getKey().isBetween(lastAffChangedTopVer, topVer)) + return e.getValue(); + + if (e.getKey().compareTo(lastAffChangedTopVer) < 0) + break; + } + } + CacheGroupDescriptor desc = ctx.cache().cacheGroupDescriptors().get(grpId); throw new IgniteException("Failed to resolve nodes topology [" + @@ -2025,7 +2141,16 @@ private DiscoCache resolveDiscoCache(int grpId, AffinityTopologyVersion topVer) Map> snapshots = topHist; - return snapshots.get(topVer); + Collection nodes = snapshots.get(topVer); + + if (nodes == null) { + DiscoCache cache = discoCacheHist.get(new AffinityTopologyVersion(topVer, 0)); + + if (cache != null) + nodes = cache.allNodes(); + } + + return nodes; } /** @@ -2066,7 +2191,7 @@ public Serializable consistentId() { * * @return Wrapped DiscoverySpi SPI. */ - private DiscoverySpi getInjectedDiscoverySpi() { + public DiscoverySpi getInjectedDiscoverySpi() { try { inject(); } @@ -2120,6 +2245,13 @@ public DiscoveryLocalJoinData localJoin() { } } + /** + * @return Local join future. + */ + public GridFutureAdapter localJoinFuture() { + return locJoin; + } + /** * @param msg Custom message. * @throws IgniteCheckedException If failed. @@ -2157,6 +2289,19 @@ public void clientCacheStartEvent(UUID reqId, } } + /** + * @param discoCache + * @param node + */ + public void metricsUpdateEvent(DiscoCache discoCache, ClusterNode node) { + discoWrk.addEvent(EVT_NODE_METRICS_UPDATED, + discoCache.version(), + node, + discoCache, + discoCache.nodeMap.values(), + null); + } + /** * Gets first grid node start time, see {@link DiscoverySpi#getGridStartTime()}. * @@ -2211,8 +2356,13 @@ public void failNode(UUID nodeId, @Nullable String warning) { public boolean reconnectSupported() { DiscoverySpi spi = getSpi(); - return ctx.discovery().localNode().isClient() && (spi instanceof TcpDiscoverySpi) && - !(((TcpDiscoverySpi) spi).isClientReconnectDisabled()); + ClusterNode clusterNode = ctx.discovery().localNode(); + + boolean client = (clusterNode instanceof TcpDiscoveryNode) ? + (((TcpDiscoveryNode) clusterNode).clientRouterNodeId() != null) : clusterNode.isClient(); + + return client && (spi instanceof IgniteDiscoverySpi) && + ((IgniteDiscoverySpi)spi).clientReconnectSupported(); } /** @@ -2225,7 +2375,7 @@ public void reconnect() { DiscoverySpi discoverySpi = getSpi(); - ((TcpDiscoverySpi)discoverySpi).reconnect(); + ((IgniteDiscoverySpi)discoverySpi).clientReconnect(); } /** @@ -2271,7 +2421,7 @@ public void reconnect() { if (!node.isLocal()) rmtNodes.add(node); - if (!CU.clientNode(node)) { + if (!node.isClient()) { srvNodes.add(node); if (minSrvVer == null) @@ -2292,12 +2442,6 @@ else if (node.version().compareTo(minVer) < 0) assert !rmtNodes.contains(loc) : "Remote nodes collection shouldn't contain local node" + " [rmtNodes=" + rmtNodes + ", loc=" + loc + ']'; - Map> allCacheNodes = U.newHashMap(allNodes.size()); - Map> cacheGrpAffNodes = U.newHashMap(allNodes.size()); - Set rmtNodesWithCaches = new TreeSet<>(NodeOrderComparator.getInstance()); - - fillAffinityNodeCaches(allNodes, allCacheNodes, cacheGrpAffNodes, rmtNodesWithCaches); - BaselineTopology blt = state.baselineTopology(); if (blt != null) { @@ -2340,6 +2484,13 @@ else if (node.version().compareTo(minVer) < 0) baselineNodes = null; } + Map> allCacheNodes = U.newHashMap(allNodes.size()); + Map> cacheGrpAffNodes = U.newHashMap(allNodes.size()); + Set rmtNodesWithCaches = new TreeSet<>(NodeOrderComparator.getInstance()); + + fillAffinityNodeCaches(allNodes, allCacheNodes, cacheGrpAffNodes, rmtNodesWithCaches, + nodeIdToConsIdx == null ? null : nodeIdToConsIdx.keySet()); + return new DiscoCache( topVer, state, @@ -2365,7 +2516,7 @@ else if (node.version().compareTo(minVer) < 0) * * @param cacheMap Map to add to. * @param cacheName Cache name. - * @param rich Node to add + * @param node Node to add */ private void addToMap(Map> cacheMap, String cacheName, ClusterNode rich) { List cacheNodes = cacheMap.get(CU.cacheId(cacheName)); @@ -2379,6 +2530,76 @@ private void addToMap(Map> cacheMap, String cacheName cacheNodes.add(rich); } + /** + * @param cfg Configuration. + * @throws IgniteCheckedException If configuration is not valid. + */ + public static void initCommunicationErrorResolveConfiguration(IgniteConfiguration cfg) throws IgniteCheckedException { + CommunicationFailureResolver rslvr = cfg.getCommunicationFailureResolver(); + CommunicationSpi commSpi = cfg.getCommunicationSpi(); + DiscoverySpi discoverySpi = cfg.getDiscoverySpi(); + + if (rslvr != null) { + if (!supportsCommunicationErrorResolve(commSpi)) + throw new IgniteCheckedException("CommunicationFailureResolver is configured, but CommunicationSpi does not support communication" + + "problem resolve: " + commSpi.getClass().getName()); + + if (!supportsCommunicationErrorResolve(discoverySpi)) + throw new IgniteCheckedException("CommunicationFailureResolver is configured, but DiscoverySpi does not support communication" + + "problem resolve: " + discoverySpi.getClass().getName()); + } + else { + if (supportsCommunicationErrorResolve(commSpi) && supportsCommunicationErrorResolve(discoverySpi)) + cfg.setCommunicationFailureResolver(new DefaultCommunicationFailureResolver()); + } + } + + /** + * @param spi Discovery SPI. + * @return {@code True} if SPI supports communication error resolve. + */ + private static boolean supportsCommunicationErrorResolve(DiscoverySpi spi) { + return spi instanceof IgniteDiscoverySpi && ((IgniteDiscoverySpi)spi).supportsCommunicationFailureResolve(); + } + + /** + * @param spi Discovery SPI. + * @return {@code True} if SPI supports communication error resolve. + */ + private static boolean supportsCommunicationErrorResolve(CommunicationSpi spi) { + return spi instanceof TcpCommunicationSpi; + } + + /** + * @return {@code True} if communication error resolve is supported. + */ + public boolean communicationErrorResolveSupported() { + return ctx.config().getCommunicationFailureResolver() != null; + } + + /** + * @return {@code True} if configured {@link DiscoverySpi} supports mutable custom messages. + */ + public boolean mutableCustomMessages() { + DiscoverySpiMutableCustomMessageSupport ann = U.getAnnotation(ctx.config().getDiscoverySpi().getClass(), + DiscoverySpiMutableCustomMessageSupport.class); + + return ann != null && ann.value(); + } + + /** + * @param node Problem node. + * @param err Error. + */ + public void resolveCommunicationError(ClusterNode node, Exception err) { + DiscoverySpi spi = getSpi(); + + if (!supportsCommunicationErrorResolve(spi) || !supportsCommunicationErrorResolve(ctx.config().getCommunicationSpi())) + throw new UnsupportedOperationException(); + + ((IgniteDiscoverySpi)spi).resolveCommunicationFailure(node, err); + } + /** Worker for network segment checks. */ private class SegmentCheckWorker extends GridWorker { /** */ @@ -2404,15 +2625,15 @@ public void scheduleSegmentCheck() { /** {@inheritDoc} */ @SuppressWarnings("StatementWithEmptyBody") @Override protected void body() throws InterruptedException { - long lastChk = 0; + long lastChkNanos = 0; while (!isCancelled()) { Object req = queue.poll(2000, MILLISECONDS); - long now = U.currentTimeMillis(); + long nowNanos = System.nanoTime(); // Check frequency if segment check has not been requested. - if (req == null && (segChkFreq == 0 || lastChk + segChkFreq >= now)) { + if (req == null && (segChkFreq == 0 || U.nanosToMillis(nowNanos - lastChkNanos) <= segChkFreq)) { if (log.isDebugEnabled()) log.debug("Skipping segment check as it has not been requested and it is not time to check."); @@ -2421,7 +2642,7 @@ public void scheduleSegmentCheck() { // We should always check segment if it has been explicitly // requested (on any node failure or leave). - assert req != null || lastChk + segChkFreq < now; + assert req != null || U.nanosToMillis(nowNanos - lastChkNanos) > segChkFreq; // Drain queue. while (queue.poll() != null) { @@ -2431,7 +2652,7 @@ public void scheduleSegmentCheck() { if (lastSegChkRes.get()) { boolean segValid = ctx.segmentation().isValidSegment(); - lastChk = now; + lastChkNanos = nowNanos; if (!segValid) { ClusterNode node = getSpi().getLocalNode(); @@ -2464,6 +2685,95 @@ public void scheduleSegmentCheck() { } } + /** + * + */ + private class DiscoveryMessageNotifierWorker extends GridWorker implements IgniteDiscoveryThread { + /** Queue. */ + private final BlockingQueue> queue = new LinkedBlockingQueue<>(); + + /** + * Default constructor. + */ + protected DiscoveryMessageNotifierWorker() { + super(ctx.igniteInstanceName(), "disco-notifier-worker", GridDiscoveryManager.this.log, ctx.workersRegistry()); + } + + /** + * + */ + private void body0() throws InterruptedException { + T2 notification; + + blockingSectionBegin(); + + try { + notification = queue.take(); + } + finally { + blockingSectionEnd(); + } + + try { + notification.get2().run(); + } + finally { + notification.get1().onDone(); + } + } + + /** + * @param cmd Command. + */ + public synchronized void submit(GridFutureAdapter notificationFut, Runnable cmd) { + if (isCancelled()) { + notificationFut.onDone(); + + return; + } + + queue.add(new T2<>(notificationFut, cmd)); + } + + /** + * Cancel thread execution and completes all notification futures. + */ + @Override public synchronized void cancel() { + super.cancel(); + + while (!queue.isEmpty()) { + T2 notification = queue.poll(); + + if (notification != null) + notification.get1().onDone(); + } + } + + /** {@inheritDoc} */ + @Override protected void body() throws InterruptedException, IgniteInterruptedCheckedException { + while (!isCancelled()) { + try { + body0(); + } + catch (InterruptedException e) { + if (!isCancelled) + ctx.failure().process(new FailureContext(SYSTEM_WORKER_TERMINATION, e)); + + throw e; + } + catch (Throwable t) { + U.error(log, "Exception in discovery notyfier worker thread.", t); + + FailureType type = t instanceof OutOfMemoryError ? CRITICAL_ERROR : SYSTEM_WORKER_TERMINATION; + + ctx.failure().process(new FailureContext(type, t)); + + throw t; + } + } + } + } + /** Worker for discovery events. */ private class DiscoveryWorker extends GridWorker { /** */ @@ -2486,7 +2796,7 @@ private class DiscoveryWorker extends GridWorker { * */ private DiscoveryWorker() { - super(ctx.igniteInstanceName(), "disco-event-worker", GridDiscoveryManager.this.log); + super(ctx.igniteInstanceName(), "disco-event-worker", GridDiscoveryManager.this.log, ctx.workersRegistry()); } /** @@ -2511,28 +2821,28 @@ private void recordEvent(int type, long topVer, ClusterNode node, DiscoCache dis evt.topologySnapshot(topVer, U.arrayList(topSnapshot, FILTER_NOT_DAEMON)); if (type == EVT_NODE_METRICS_UPDATED) - evt.message("Metrics were updated: " + node); + evt.messageTemplate("Metrics were updated: "); else if (type == EVT_NODE_JOINED) - evt.message("Node joined: " + node); + evt.messageTemplate("Node joined: "); else if (type == EVT_NODE_LEFT) - evt.message("Node left: " + node); + evt.messageTemplate("Node left: "); else if (type == EVT_NODE_FAILED) - evt.message("Node failed: " + node); + evt.messageTemplate("Node failed: "); else if (type == EVT_NODE_SEGMENTED) - evt.message("Node segmented: " + node); + evt.messageTemplate("Node segmented: "); else if (type == EVT_CLIENT_NODE_DISCONNECTED) - evt.message("Client node disconnected: " + node); + evt.messageTemplate("Client node disconnected: "); else if (type == EVT_CLIENT_NODE_RECONNECTED) - evt.message("Client node reconnected: " + node); + evt.messageTemplate("Client node reconnected: "); else - assert false; + assert false : "Unexpected discovery message type: " + type;; ctx.event().record(evt, discoCache); } @@ -2564,15 +2874,23 @@ void addEvent( while (!isCancelled()) { try { body0(); + + onIdle(); } catch (InterruptedException e) { + if (!isCancelled) + ctx.failure().process(new FailureContext(SYSTEM_WORKER_TERMINATION, e)); + throw e; } catch (Throwable t) { - U.error(log, "Unexpected exception in discovery worker thread (ignored).", t); + U.error(log, "Exception in discovery event worker thread.", t); + + FailureType type = t instanceof OutOfMemoryError ? CRITICAL_ERROR : SYSTEM_WORKER_TERMINATION; - if (t instanceof Error) - throw (Error)t; + ctx.failure().process(new FailureContext(type, t)); + + throw t; } } } @@ -2581,12 +2899,24 @@ void addEvent( @SuppressWarnings("DuplicateCondition") private void body0() throws InterruptedException { GridTuple6, - DiscoveryCustomMessage> evt = evts.take(); + DiscoveryCustomMessage> evt; + + blockingSectionBegin(); + + try { + evt = evts.take(); + } + finally { + blockingSectionEnd(); + } int type = evt.get1(); AffinityTopologyVersion topVer = evt.get2(); + if (type == EVT_NODE_METRICS_UPDATED && (discoCache == null || topVer.compareTo(discoCache.version()) < 0)) + return; + ClusterNode node = evt.get3(); boolean isDaemon = node.isDaemon(); @@ -2613,7 +2943,7 @@ private void body0() throws InterruptedException { if (log.isInfoEnabled()) log.info("Added new node to topology: " + node); - ackTopology(topVer.topologyVersion(), true); + ackTopology(topVer.topologyVersion(), type, node, true); } else if (log.isDebugEnabled()) log.debug("Added new node to topology: " + node); @@ -2634,7 +2964,7 @@ else if (log.isDebugEnabled()) if (log.isInfoEnabled()) log.info("Node left topology: " + node); - ackTopology(topVer.topologyVersion(), true); + ackTopology(topVer.topologyVersion(), type, node, true); } else if (log.isDebugEnabled()) log.debug("Node left topology: " + node); @@ -2656,7 +2986,7 @@ else if (log.isDebugEnabled()) log.info("Client node reconnected to topology: " + node); if (!isLocDaemon) - ackTopology(topVer.topologyVersion(), true); + ackTopology(topVer.topologyVersion(), type, node, true); break; } @@ -2670,7 +3000,7 @@ else if (log.isDebugEnabled()) if (!isLocDaemon) { U.warn(log, "Node FAILED: " + node); - ackTopology(topVer.topologyVersion(), true); + ackTopology(topVer.topologyVersion(), type, node, true); } else if (log.isDebugEnabled()) log.debug("Node FAILED: " + node); @@ -2951,23 +3281,29 @@ private static class CacheGroupAffinity { /** Cache mode. */ private final CacheMode cacheMode; + /** Persistent cache group or not. */ + private final boolean persistentCacheGrp; + /** * @param name Name. * @param cacheFilter Node filter. * @param cacheMode Cache mode. + * @param persistentCacheGrp Persistence is configured for cache or not. */ CacheGroupAffinity( - String name, - IgnitePredicate cacheFilter, - CacheMode cacheMode) { + String name, + IgnitePredicate cacheFilter, + CacheMode cacheMode, + boolean persistentCacheGrp) { this.name = name; this.cacheFilter = cacheFilter; this.cacheMode = cacheMode; + this.persistentCacheGrp = persistentCacheGrp; } /** {@inheritDoc} */ @Override public String toString() { - return "CacheGroupAffinity [name=" + name + ']'; + return S.toString(CacheGroupAffinity.class, this); } } @@ -3092,14 +3428,19 @@ private Boolean cacheClientNode(ClusterNode node) { /** * Fills affinity node caches. - * * @param allNodes All nodes. * @param allCacheNodes All cache nodes. * @param cacheGrpAffNodes Cache group aff nodes. * @param rmtNodesWithCaches Rmt nodes with caches. - */ - private void fillAffinityNodeCaches(List allNodes, Map> allCacheNodes, - Map> cacheGrpAffNodes, Set rmtNodesWithCaches) { + * @param bltNodes Baseline node ids. + */ + private void fillAffinityNodeCaches( + List allNodes, + Map> allCacheNodes, + Map> cacheGrpAffNodes, + Set rmtNodesWithCaches, + Set bltNodes + ) { for (ClusterNode node : allNodes) { assert node.order() != 0 : "Invalid node order [locNode=" + localNode() + ", node=" + node + ']'; assert !node.isDaemon(); @@ -3109,6 +3450,9 @@ private void fillAffinityNodeCaches(List allNodes, Map nodes = cacheGrpAffNodes.get(grpId); if (nodes == null) @@ -3148,7 +3492,10 @@ public DiscoCache createDiscoCacheOnCacheChange( Map> cacheGrpAffNodes = U.newHashMap(allNodes.size()); Set rmtNodesWithCaches = new TreeSet<>(NodeOrderComparator.getInstance()); - fillAffinityNodeCaches(allNodes, allCacheNodes, cacheGrpAffNodes, rmtNodesWithCaches); + Map nodeIdToConsIdx = discoCache.nodeIdToConsIdx; + + fillAffinityNodeCaches(allNodes, allCacheNodes, cacheGrpAffNodes, rmtNodesWithCaches, + nodeIdToConsIdx == null ? null : nodeIdToConsIdx.keySet()); return new DiscoCache( topVer, @@ -3164,7 +3511,7 @@ public DiscoCache createDiscoCacheOnCacheChange( cacheGrpAffNodes, discoCache.nodeMap, discoCache.alives, - discoCache.nodeIdToConsIdx, + nodeIdToConsIdx, discoCache.consIdxToNodeId, discoCache.minimumNodeVersion(), discoCache.minimumServerNodeVersion()); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteClusterNode.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteClusterNode.java new file mode 100644 index 0000000000000..a143122c88e9c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteClusterNode.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.discovery; + +import java.io.Serializable; +import java.util.Map; +import org.apache.ignite.cache.CacheMetrics; +import org.apache.ignite.cluster.ClusterMetrics; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.IgniteConfiguration; + +/** + * + */ +public interface IgniteClusterNode extends ClusterNode { + /** + * Sets consistent globally unique node ID which survives node restarts. + * + * @param consistentId Consistent globally unique node ID. + */ + public void setConsistentId(Serializable consistentId); + + /** + * Sets node metrics. + * + * @param metrics Node metrics. + */ + public void setMetrics(ClusterMetrics metrics); + + /** + * Gets collections of cache metrics for this node. Note that node cache metrics are constantly updated + * and provide up to date information about caches. + *

+ * Cache metrics are updated with some delay which is directly related to metrics update + * frequency. For example, by default the update will happen every {@code 2} seconds. + * + * @return Runtime metrics snapshots for this node. + */ + public Map cacheMetrics(); + + /** + * Sets node cache metrics. + * + * @param cacheMetrics Cache metrics. + */ + public void setCacheMetrics(Map cacheMetrics); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteDiscoverySpi.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteDiscoverySpi.java new file mode 100644 index 0000000000000..f2f825040ce4c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteDiscoverySpi.java @@ -0,0 +1,74 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.discovery; + +import java.util.UUID; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.IgniteFeatures; +import org.apache.ignite.spi.discovery.DiscoverySpi; + +/** + * + */ +public interface IgniteDiscoverySpi extends DiscoverySpi { + /** + * @param nodeId Node ID. + * @return {@code True} if node joining or already joined topology. + */ + public boolean knownNode(UUID nodeId); + + /** + * + * @return {@code True} if SPI supports client reconnect. + */ + public boolean clientReconnectSupported(); + + /** + * + */ + public void clientReconnect(); + + /** + * @param feature Feature to check. + * @return {@code true} if all nodes support the given feature. + */ + public boolean allNodesSupport(IgniteFeatures feature); + + /** + * For TESTING only. + */ + public void simulateNodeFailure(); + + /** + * For TESTING only. + * + * @param lsnr Listener. + */ + public void setInternalListener(IgniteDiscoverySpiInternalListener lsnr); + + /** + * @return {@code True} if supports communication error resolve. + */ + public boolean supportsCommunicationFailureResolve(); + + /** + * @param node Problem node. + * @param err Connection error. + */ + public void resolveCommunicationFailure(ClusterNode node, Exception err); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteDiscoverySpiInternalListener.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteDiscoverySpiInternalListener.java new file mode 100644 index 0000000000000..80164238dd639 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IgniteDiscoverySpiInternalListener.java @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.discovery; + +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.spi.discovery.DiscoverySpi; +import org.apache.ignite.spi.discovery.DiscoverySpiCustomMessage; + +/** + * For TESTING only. + */ +public interface IgniteDiscoverySpiInternalListener { + /** + * @param locNode Local node. + * @param log Log. + */ + public void beforeJoin(ClusterNode locNode, IgniteLogger log); + + /** + * @param locNode Local node. + * @param log Logger. + */ + default public void beforeReconnect(ClusterNode locNode, IgniteLogger log) { + // No-op. + } + + /** + * @param spi SPI instance. + * @param log Logger. + * @param msg Custom message. + * @return {@code False} to cancel event send. + */ + public boolean beforeSendCustomEvent(DiscoverySpi spi, IgniteLogger log, DiscoverySpiCustomMessage msg); +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IncompleteDeserializationException.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IncompleteDeserializationException.java new file mode 100644 index 0000000000000..5a440cefa1cca --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/discovery/IncompleteDeserializationException.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.managers.discovery; + +import org.jetbrains.annotations.NotNull; + +/** + * Exception which can be used to access a message which failed to be deserialized completely using Java serialization. + * Throwed from deserialization methods it can be caught by a caller. + *

+ * Should be {@link RuntimeException} because of limitations of Java serialization mechanisms. + *

+ * Catching {@link ClassNotFoundException} inside deserialization methods cannot do the same trick because + * Java deserialization remembers such exception internally and will rethrow it anyway upon returing to a user. + */ +public class IncompleteDeserializationException extends RuntimeException { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private final DiscoveryCustomMessage m; + + /** + * @param m Message. + */ + public IncompleteDeserializationException(@NotNull DiscoveryCustomMessage m) { + super(null, null, false, false); + + this.m = m; + } + + /** + * @return Message. + */ + @NotNull public DiscoveryCustomMessage message() { + return m; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java index 68084500757ca..92963403f97f6 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageManager.java @@ -72,9 +72,12 @@ import static org.apache.ignite.events.EventType.EVTS_ALL; import static org.apache.ignite.events.EventType.EVTS_DISCOVERY_ALL; +import static org.apache.ignite.events.EventType.EVT_JOB_MAPPED; import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; import static org.apache.ignite.events.EventType.EVT_NODE_METRICS_UPDATED; +import static org.apache.ignite.events.EventType.EVT_TASK_FAILED; +import static org.apache.ignite.events.EventType.EVT_TASK_FINISHED; import static org.apache.ignite.internal.GridTopic.TOPIC_EVENT; import static org.apache.ignite.internal.events.DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.PUBLIC_POOL; @@ -316,6 +319,9 @@ public void record(DiscoveryEvent evt, DiscoCache discoCache) { private void record0(Event evt, Object... params) { assert evt != null; + if (ctx.recoveryMode()) + return; + if (!enterBusy()) return; @@ -372,7 +378,7 @@ public int[] enabledEvents() { public synchronized void enableEvents(int[] types) { assert types != null; - ctx.security().authorize(null, SecurityPermission.EVENTS_ENABLE, null); + ctx.security().authorize(SecurityPermission.EVENTS_ENABLE); boolean[] userRecordableEvts0 = userRecordableEvts; boolean[] recordableEvts0 = recordableEvts; @@ -415,7 +421,7 @@ public synchronized void enableEvents(int[] types) { public synchronized void disableEvents(int[] types) { assert types != null; - ctx.security().authorize(null, SecurityPermission.EVENTS_DISABLE, null); + ctx.security().authorize(SecurityPermission.EVENTS_DISABLE); boolean[] userRecordableEvts0 = userRecordableEvts; boolean[] recordableEvts0 = recordableEvts; @@ -504,7 +510,16 @@ private boolean isHiddenEvent(int type) { * @return {@code true} if this is an internal event. */ private boolean isInternalEvent(int type) { - return type == EVT_DISCOVERY_CUSTOM_EVT || F.contains(EVTS_DISCOVERY_ALL, type); + switch (type) { + case EVT_DISCOVERY_CUSTOM_EVT: + case EVT_TASK_FINISHED: + case EVT_TASK_FAILED: + case EVT_JOB_MAPPED: + return true; + + default: + return F.contains(EVTS_DISCOVERY_ALL, type); + } } /** @@ -559,13 +574,8 @@ public boolean hasListener(int type) { public boolean isAllUserRecordable(int[] types) { assert types != null; - boolean[] userRecordableEvts0 = userRecordableEvts; - for (int type : types) { - if (type < 0 || type >= len) - throw new IllegalArgumentException("Invalid event type: " + type); - - if (!userRecordableEvts0[type]) + if (!isUserRecordable(type)) return false; } @@ -1063,21 +1073,18 @@ private List query(IgnitePredicate p, Collection uidsCp = null; synchronized (qryMux) { try { - while (!uids.isEmpty() && err.get() == null && delta > 0) { - qryMux.wait(delta); + while (!uids.isEmpty() && err.get() == null && passedMillis < timeout) { + qryMux.wait(timeout - passedMillis); - delta = endTime - U.currentTimeMillis(); + passedMillis = U.millisSinceNanos(startNanos); } } catch (InterruptedException e) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageMessage.java index 515500b91d0f1..fd5326cf0577c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/managers/eventstorage/GridEventStorageMessage.java @@ -445,4 +445,4 @@ void exceptionBytes(byte[] exBytes) { @Override public String toString() { return S.toString(GridEventStorageMessage.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedClassDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedClassDescriptor.java index ccd99468a51db..0369b66513a10 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedClassDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedClassDescriptor.java @@ -43,16 +43,18 @@ import java.util.List; import java.util.Map; import java.util.Properties; +import java.util.TreeMap; import java.util.UUID; import java.util.concurrent.ConcurrentMap; import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.SerializableTransient; +import org.apache.ignite.internal.util.TransientSerializable; +import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteProductVersion; import org.apache.ignite.marshaller.MarshallerContext; import org.apache.ignite.marshaller.MarshallerExclusions; -import org.apache.ignite.marshaller.MarshallerUtils; import static java.lang.reflect.Modifier.isFinal; import static java.lang.reflect.Modifier.isPrivate; @@ -92,6 +94,8 @@ import static org.apache.ignite.internal.marshaller.optimized.OptimizedMarshallerUtils.STR; import static org.apache.ignite.internal.marshaller.optimized.OptimizedMarshallerUtils.UUID; import static org.apache.ignite.internal.marshaller.optimized.OptimizedMarshallerUtils.computeSerialVersionUid; +import static org.apache.ignite.marshaller.MarshallerUtils.jobReceiverVersion; +import static org.apache.ignite.marshaller.MarshallerUtils.jobSenderVersion; /** * Class descriptor. @@ -172,6 +176,9 @@ class OptimizedClassDescriptor { /** Method returns serializable transient fields. */ private Method serTransMtd; + /** Method returns transient serializable fields. */ + private Method transSerMtd; + /** * Creates descriptor for class. * @@ -448,16 +455,16 @@ else if (Proxy.class.isAssignableFrom(cls)) { readObjMtds.add(mtd); final SerializableTransient serTransAn = c.getAnnotation(SerializableTransient.class); + final TransientSerializable transSerAn = c.getAnnotation(TransientSerializable.class); // Custom serialization policy for transient fields. if (serTransAn != null) { try { - serTransMtd = c.getDeclaredMethod(serTransAn.methodName(), cls, IgniteProductVersion.class); + serTransMtd = c.getDeclaredMethod(serTransAn.methodName(), IgniteProductVersion.class); int mod = serTransMtd.getModifiers(); - if (isStatic(mod) && isPrivate(mod) - && serTransMtd.getReturnType() == String[].class) + if (isStatic(mod) && isPrivate(mod) && serTransMtd.getReturnType() == String[].class) serTransMtd.setAccessible(true); else // Set method back to null if it has incorrect signature. @@ -468,6 +475,24 @@ else if (Proxy.class.isAssignableFrom(cls)) { } } + // Custom serialization policy for non-transient fields. + if (transSerAn != null) { + try { + transSerMtd = c.getDeclaredMethod(transSerAn.methodName(), IgniteProductVersion.class); + + int mod = transSerMtd.getModifiers(); + + if (isStatic(mod) && isPrivate(mod) && transSerMtd.getReturnType() == String[].class) + transSerMtd.setAccessible(true); + else + // Set method back to null if it has incorrect signature. + transSerMtd = null; + } + catch (NoSuchMethodException ignored) { + transSerMtd = null; + } + } + Field[] clsFields0 = c.getDeclaredFields(); Map fieldNames = new HashMap<>(); @@ -824,7 +849,7 @@ void write(OptimizedObjectOutputStream out, Object obj) throws IOException { writeTypeData(out); out.writeShort(checksum); - out.writeSerializable(obj, writeObjMtds, serializableFields(obj.getClass(), obj, null)); + out.writeSerializable(obj, writeObjMtds, fields(obj.getClass(), jobReceiverVersion())); break; @@ -840,45 +865,52 @@ void write(OptimizedObjectOutputStream out, Object obj) throws IOException { * ignored. * * @param cls Class. - * @param obj Object. * @param ver Job sender version. * @return Serializable fields. */ @SuppressWarnings("ForLoopReplaceableByForEach") - private Fields serializableFields(Class cls, Object obj, IgniteProductVersion ver) { - if (serTransMtd == null) + private Fields fields(Class cls, IgniteProductVersion ver) { + if (ver == null // No context available. + || serTransMtd == null && transSerMtd == null) return fields; try { - final String[] transFields = (String[])serTransMtd.invoke(cls, obj, ver); + final String[] transFields = serTransMtd == null ? null : (String[])serTransMtd.invoke(null, ver); + final String[] serFields = transSerMtd == null ? null : (String[])transSerMtd.invoke(null, ver); - if (transFields == null || transFields.length == 0) + if (F.isEmpty(transFields) && F.isEmpty(serFields)) return fields; - List clsFields = new ArrayList<>(); + Map clsFields = new TreeMap<>(); - clsFields.addAll(fields.fields.get(0).fields); + for (FieldInfo field : fields.fields.get(0).fields) { + clsFields.put(field.fieldName, field); + } - for (int i = 0; i < transFields.length; i++) { - final String fieldName = transFields[i]; + // Add serializable transient fields + if (!F.isEmpty(transFields)) { + for (int i = 0; i < transFields.length; i++) { + final String fieldName = transFields[i]; - final Field f = cls.getDeclaredField(fieldName); + final Field f = cls.getDeclaredField(fieldName); - FieldInfo fieldInfo = new FieldInfo(f, f.getName(), - GridUnsafe.objectFieldOffset(f), fieldType(f.getType())); + FieldInfo fieldInfo = new FieldInfo(f, f.getName(), + GridUnsafe.objectFieldOffset(f), fieldType(f.getType())); - clsFields.add(fieldInfo); + clsFields.put(fieldName, fieldInfo); + } } - Collections.sort(clsFields, new Comparator() { - @Override public int compare(FieldInfo t1, FieldInfo t2) { - return t1.name().compareTo(t2.name()); + // Exclude non-transient fields which shouldn't be serialized. + if (!F.isEmpty(serFields)) { + for (int i = 0; i < serFields.length; i++) { + clsFields.remove(serFields[i]); } - }); + } - List fields = new ArrayList<>(); + List fields = new ArrayList<>(1); - fields.add(new ClassFields(clsFields)); + fields.add(new ClassFields(new ArrayList<>(clsFields.values()))); return new Fields(fields); } @@ -919,12 +951,7 @@ Object read(OptimizedObjectInputStream in) throws ClassNotFoundException, IOExce case SERIALIZABLE: verifyChecksum(in.readShort()); - // If no serialize method, then unmarshal as usual. - if (serTransMtd != null) - return in.readSerializable(cls, readObjMtds, readResolveMtd, - serializableFields(cls, null, MarshallerUtils.jobSenderVersion())); - else - return in.readSerializable(cls, readObjMtds, readResolveMtd, fields); + return in.readSerializable(cls, readObjMtds, readResolveMtd, fields(cls, jobSenderVersion())); default: assert false : "Unexpected type: " + type; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedMarshallerUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedMarshallerUtils.java index aa4bfd6a2cd66..3ede240938146 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedMarshallerUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedMarshallerUtils.java @@ -201,9 +201,9 @@ static OptimizedClassDescriptor classDescriptor( boolean registered; try { - registered = ctx.registerClassName(JAVA_ID, typeId, cls.getName()); + registered = ctx.registerClassName(JAVA_ID, typeId, cls.getName(), false); } - catch (IgniteCheckedException e) { + catch (Exception e) { throw new IOException("Failed to register class: " + cls.getName(), e); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedObjectOutputStream.java b/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedObjectOutputStream.java index 66da2da7eb908..41bd80dd9a538 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedObjectOutputStream.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/marshaller/optimized/OptimizedObjectOutputStream.java @@ -42,6 +42,7 @@ import org.apache.ignite.internal.util.GridHandleTable; import org.apache.ignite.internal.util.io.GridDataOutput; import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.marshaller.MarshallerContext; @@ -62,7 +63,7 @@ /** * Optimized object output stream. */ -class OptimizedObjectOutputStream extends ObjectOutputStream { +public class OptimizedObjectOutputStream extends ObjectOutputStream { /** */ private final GridHandleTable handles = new GridHandleTable(10, 3.00f); @@ -180,7 +181,8 @@ private void writeObject0(Object obj) throws IOException { if (obj == null) writeByte(NULL); else { - if (obj instanceof Throwable && !(obj instanceof Externalizable)) { + if (obj instanceof Throwable && !(obj instanceof Externalizable) || U.isEnum(obj.getClass())) { + // Avoid problems with differing Enum objects or Enum implementation class deadlocks. writeByte(JDK); try { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/mem/DirectMemoryProvider.java b/modules/core/src/main/java/org/apache/ignite/internal/mem/DirectMemoryProvider.java index a90c6b80b9e33..03d386bdfc326 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/mem/DirectMemoryProvider.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/mem/DirectMemoryProvider.java @@ -27,9 +27,11 @@ public interface DirectMemoryProvider { public void initialize(long[] chunkSizes); /** - * Shuts down the provider. Will deallocate all previously allocated regions. + * Shuts down the provider. + * + * @param deallocate {@code True} to deallocate memory, {@code false} to allow memory reuse. */ - public void shutdown(); + public void shutdown(boolean deallocate); /** * Attempts to allocate next memory region. Will return {@code null} if no more regions are available. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/mem/file/MappedFileMemoryProvider.java b/modules/core/src/main/java/org/apache/ignite/internal/mem/file/MappedFileMemoryProvider.java index 3800214b90f7f..67e86f5ff779c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/mem/file/MappedFileMemoryProvider.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/mem/file/MappedFileMemoryProvider.java @@ -30,7 +30,9 @@ import org.apache.ignite.internal.util.typedef.internal.U; /** - * + * Memory provider implementation based on memory mapped file. + *

+ * Doesn't support memory reuse semantics. */ public class MappedFileMemoryProvider implements DirectMemoryProvider { /** */ @@ -55,6 +57,9 @@ public class MappedFileMemoryProvider implements DirectMemoryProvider { /** */ private List mappedFiles; + /** Flag shows if current memory provider have been already initialized. */ + private boolean isInit; + /** * @param allocationPath Allocation path. */ @@ -65,6 +70,9 @@ public MappedFileMemoryProvider(IgniteLogger log, File allocationPath) { /** {@inheritDoc} */ @Override public void initialize(long[] sizes) { + if (isInit) + throw new IgniteException("Second initialization does not allowed for current provider"); + this.sizes = sizes; mappedFiles = new ArrayList<>(sizes.length); @@ -90,18 +98,24 @@ public MappedFileMemoryProvider(IgniteLogger log, File allocationPath) { "opened by another process and current user has enough rights): " + file); } } + + isInit = true; } /** {@inheritDoc} */ - @Override public void shutdown() { - for (MappedFile file : mappedFiles) { - try { - file.close(); - } - catch (IOException e) { - log.error("Failed to close memory-mapped file upon stop (will ignore) [file=" + - file.file() + ", err=" + e.getMessage() + ']'); + @Override public void shutdown(boolean deallocate) { + if (mappedFiles != null) { + for (MappedFile file : mappedFiles) { + try { + file.close(); + } + catch (IOException e) { + log.error("Failed to close memory-mapped file upon stop (will ignore) [file=" + + file.file() + ", err=" + e.getMessage() + ']'); + } } + + mappedFiles = null; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/mem/unsafe/UnsafeMemoryProvider.java b/modules/core/src/main/java/org/apache/ignite/internal/mem/unsafe/UnsafeMemoryProvider.java index 276e10e1783c9..8cb8119001a23 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/mem/unsafe/UnsafeMemoryProvider.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/mem/unsafe/UnsafeMemoryProvider.java @@ -29,7 +29,9 @@ import org.apache.ignite.internal.util.typedef.internal.U; /** - * + * Memory provider implementation based on unsafe memory access. + *

+ * Supports memory reuse semantics. */ public class UnsafeMemoryProvider implements DirectMemoryProvider { /** */ @@ -41,6 +43,12 @@ public class UnsafeMemoryProvider implements DirectMemoryProvider { /** */ private IgniteLogger log; + /** Flag shows if current memory provider have been already initialized. */ + private boolean isInit; + + /** */ + private int used = 0; + /** * @param log Ignite logger to use. */ @@ -50,30 +58,43 @@ public UnsafeMemoryProvider(IgniteLogger log) { /** {@inheritDoc} */ @Override public void initialize(long[] sizes) { + if (isInit) + return; + this.sizes = sizes; regions = new ArrayList<>(); + + isInit = true; } /** {@inheritDoc} */ - @Override public void shutdown() { + @Override public void shutdown(boolean deallocate) { if (regions != null) { for (Iterator it = regions.iterator(); it.hasNext(); ) { DirectMemoryRegion chunk = it.next(); - GridUnsafe.freeMemory(chunk.address()); + if (deallocate) { + GridUnsafe.freeMemory(chunk.address()); - // Safety. - it.remove(); + // Safety. + it.remove(); + } } + + if (!deallocate) + used = 0; } } /** {@inheritDoc} */ @Override public DirectMemoryRegion nextRegion() { - if (regions.size() == sizes.length) + if (used == sizes.length) return null; + if (used < regions.size()) + return regions.get(used++); + long chunkSize = sizes[regions.size()]; long ptr; @@ -85,7 +106,7 @@ public UnsafeMemoryProvider(IgniteLogger log) { String msg = "Failed to allocate next memory chunk: " + U.readableSize(chunkSize, true) + ". Check if chunkSize is too large and 32-bit JVM is used."; - if (regions.size() == 0) + if (regions.isEmpty()) throw new IgniteException(msg, e); U.error(log, msg); @@ -103,6 +124,8 @@ public UnsafeMemoryProvider(IgniteLogger log) { regions.add(region); + used++; + return region; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/FullPageId.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/FullPageId.java index 9e249437f5bb2..17c552d9ddecf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/FullPageId.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/FullPageId.java @@ -40,10 +40,10 @@ *

*

Page ID rotation

* There are scenarios when we reference one page (B) from within another page (A) by page ID. It is also - * possible that this first page (B) is de-allocated and allocated again for a different purpose. In this - * case we should have a mechanism to determine that page (B) cannot be used after reading it's ID in page (A). + * possible that this first page (B) is concurrently reused for a different purpose. In this + * case we should have a mechanism to determine that the reference from page (A) to page (B) is no longer valid. * This is ensured by page ID rotation - together with page's (B) ID we should write some value that is incremented - * each time a page is de-allocated (page ID rotation). This ID should be verified after page read and a page + * each time a page is reused (page ID rotation). This ID should be verified after page read and a page * should be discarded if full ID is different. *

* Effective page ID is page ID with zeroed bits used for page ID rotation. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java index c6aeabe087975..d91d31da32957 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageIdAllocator.java @@ -35,6 +35,12 @@ public interface PageIdAllocator { /** Special partition reserved for index space. */ public static final int INDEX_PARTITION = 0xFFFF; + /** Old special partition reserved for metastore space. */ + public static final int OLD_METASTORE_PARTITION = 0x0; + + /** Special partition reserved for metastore space. */ + public static final int METASTORE_PARTITION = 0x1; + /** * Allocates a page from the space for the given partition ID and the given flags. * diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageMemory.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageMemory.java index 6f2e2c9ec36ed..f7391d2d79a9c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageMemory.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageMemory.java @@ -18,11 +18,25 @@ package org.apache.ignite.internal.pagemem; import java.nio.ByteBuffer; +import org.apache.ignite.IgniteException; import org.apache.ignite.lifecycle.LifecycleAware; /** */ -public interface PageMemory extends LifecycleAware, PageIdAllocator, PageSupport { +public interface PageMemory extends PageIdAllocator, PageSupport { + /** + * Start page memory. + */ + public void start() throws IgniteException; + + /** + * Stop page memory. + * + * @param deallocate {@code True} to deallocate memory, {@code false} to allow memory reuse on subsequent {@link #start()} + * @throws IgniteException + */ + public void stop(boolean deallocate) throws IgniteException; + /** * @return Page size in bytes. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageSupport.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageSupport.java index ed2311f3ebd26..877234d34cf88 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageSupport.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/PageSupport.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.pagemem; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.stat.IoStatisticsHolder; /** * Supports operations on pages. @@ -35,6 +36,19 @@ public interface PageSupport { */ public long acquirePage(int grpId, long pageId) throws IgniteCheckedException; + /** + * Gets the page absolute pointer associated with the given page ID. Each page obtained with this method must be + * released by calling {@link #releasePage(int, long, long)}. This method will allocate page with given ID if it + * doesn't exist. + * + * @param grpId Cache group ID. + * @param pageId Page ID. + * @param statHolder Statistics holder to track IO operations. + * @return Page pointer. + * @throws IgniteCheckedException If failed. + */ + public long acquirePage(int grpId, long pageId, IoStatisticsHolder statHolder) throws IgniteCheckedException; + /** * * @param grpId Cache group ID. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java index 7424af664a313..647ff07bf3a9a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/impl/PageMemoryNoStoreImpl.java @@ -28,6 +28,8 @@ import org.apache.ignite.IgniteLogger; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.configuration.DataRegionConfiguration; +import org.apache.ignite.failure.FailureContext; +import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.mem.DirectMemoryProvider; import org.apache.ignite.internal.mem.DirectMemoryRegion; import org.apache.ignite.internal.mem.IgniteOutOfMemoryException; @@ -36,6 +38,8 @@ import org.apache.ignite.internal.processors.cache.GridCacheSharedContext; import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; +import org.apache.ignite.internal.stat.IoStatisticsHolder; +import org.apache.ignite.internal.stat.IoStatisticsHolderNoOp; import org.apache.ignite.internal.util.GridUnsafe; import org.apache.ignite.internal.util.IgniteUtils; import org.apache.ignite.internal.util.OffheapReadWriteLock; @@ -158,6 +162,9 @@ public class PageMemoryNoStoreImpl implements PageMemory { /** */ private final boolean trackAcquiredPages; + /** Shared context. */ + private final GridCacheSharedContext ctx; + /** * @param log Logger. * @param directMemoryProvider Memory allocator to use. @@ -184,6 +191,7 @@ public PageMemoryNoStoreImpl( this.trackAcquiredPages = trackAcquiredPages; this.memMetrics = memMetrics; this.dataRegionCfg = dataRegionCfg; + this.ctx = sharedCtx; sysPageSize = pageSize + PAGE_OVERHEAD; @@ -225,18 +233,19 @@ public PageMemoryNoStoreImpl( if (lastIdx != SEG_CNT - 1) chunks = Arrays.copyOf(chunks, lastIdx + 1); - directMemoryProvider.initialize(chunks); + if (segments == null) + directMemoryProvider.initialize(chunks); addSegment(null); } /** {@inheritDoc} */ @SuppressWarnings("OverlyStrongTypeCast") - @Override public void stop() throws IgniteException { + @Override public void stop(boolean deallocate) throws IgniteException { if (log.isDebugEnabled()) log.debug("Stopping page memory."); - directMemoryProvider.shutdown(); + directMemoryProvider.shutdown(deallocate); if (directMemoryProvider instanceof Closeable) { try { @@ -255,8 +264,6 @@ public PageMemoryNoStoreImpl( /** {@inheritDoc} */ @Override public long allocatePage(int grpId, int partId, byte flags) { - memMetrics.incrementTotalAllocatedPages(); - long relPtr = borrowFreePage(); long absPtr = 0; @@ -277,19 +284,17 @@ public PageMemoryNoStoreImpl( relPtr = allocSeg.allocateFreePage(flags); if (relPtr != INVALID_REL_PTR) { - if (relPtr != INVALID_REL_PTR) { - absPtr = allocSeg.absolute(PageIdUtils.pageIndex(relPtr)); + absPtr = allocSeg.absolute(PageIdUtils.pageIndex(relPtr)); - break; - } + break; } else allocSeg = addSegment(seg0); } } - if (relPtr == INVALID_REL_PTR) - throw new IgniteOutOfMemoryException("Out of memory in data region [" + + if (relPtr == INVALID_REL_PTR) { + IgniteOutOfMemoryException oom = new IgniteOutOfMemoryException("Out of memory in data region [" + "name=" + dataRegionCfg.getName() + ", initSize=" + U.readableSize(dataRegionCfg.getInitialSize(), false) + ", maxSize=" + U.readableSize(dataRegionCfg.getMaxSize(), false) + @@ -299,6 +304,13 @@ public PageMemoryNoStoreImpl( " ^-- Enable eviction or expiration policies" ); + if (ctx != null) + ctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, oom)); + + throw oom; + } + + assert (relPtr & ~PageIdUtils.PAGE_IDX_MASK) == 0 : U.hexLong(relPtr & ~PageIdUtils.PAGE_IDX_MASK); // Assign page ID according to flags and partition ID. @@ -428,11 +440,20 @@ private long fromSegmentIndex(int segIdx, long pageIdx) { /** {@inheritDoc} */ @Override public long acquirePage(int cacheId, long pageId) { + return acquirePage(cacheId, pageId, IoStatisticsHolderNoOp.INSTANCE); + } + + /** {@inheritDoc} */ + @Override public long acquirePage(int cacheId, long pageId, IoStatisticsHolder statHolder) { int pageIdx = PageIdUtils.pageIndex(pageId); Segment seg = segment(pageIdx); - return seg.acquirePage(pageIdx); + long absPtr = seg.acquirePage(pageIdx); + + statHolder.trackLogicalRead(absPtr + PAGE_OVERHEAD); + + return absPtr; } /** {@inheritDoc} */ @@ -565,6 +586,8 @@ private void releaseFreePage(long pageId) { if (freePageListHead.compareAndSet(freePageRelPtrMasked, relPtr)) { allocatedPages.decrementAndGet(); + memMetrics.updateTotalAllocatedPages(-1L); + return; } } @@ -593,6 +616,8 @@ private long borrowFreePage() { allocatedPages.incrementAndGet(); + memMetrics.updateTotalAllocatedPages(1L); + return freePageRelPtr; } } @@ -793,6 +818,8 @@ private long allocateFreePage(int tag) throws GridOffHeapOutOfMemoryException { allocatedPages.incrementAndGet(); + memMetrics.updateTotalAllocatedPages(1L); + return pageIdx; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java index 1b46bf990c540..1cf488eeb18b4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/store/IgnitePageStoreManager.java @@ -19,7 +19,9 @@ import java.nio.ByteBuffer; import java.util.Map; +import java.util.function.Predicate; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.cache.CacheGroupContext; import org.apache.ignite.internal.processors.cache.CacheGroupDescriptor; @@ -193,6 +195,15 @@ public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cac * @throws IgniteCheckedException If failed. */ public void storeCacheData(StoredCacheData cacheData, boolean overwrite) throws IgniteCheckedException; + + /** + * Remove cache configuration data file. + * + * @param cacheData Cache configuration. + * @throws IgniteCheckedException If failed. + */ + public void removeCacheData(StoredCacheData cacheData) throws IgniteCheckedException; + /** * @param grpId Cache group ID. * @return {@code True} if index store for given cache group existed before node started. @@ -211,4 +222,35 @@ public void initializeForCache(CacheGroupDescriptor grpDesc, StoredCacheData cac * @return number of pages. */ public long pagesAllocated(int grpId); + + /** + * Cleanup persistent space for cache. + * + * @param cacheConfiguration Cache configuration of cache which should be cleanup. + */ + public void cleanupPersistentSpace(CacheConfiguration cacheConfiguration) throws IgniteCheckedException; + + /** + * Cleanup persistent space for all caches except metastore. + */ + public void cleanupPersistentSpace() throws IgniteCheckedException; + + /** + * Cleanup cache store whether it matches the provided predicate and if matched + * store was previously initizlized. + * + * @param cacheGrpPred Predicate to match by id cache group stores to clean. + * @param cleanFiles {@code True} to delete all persisted files related to particular store. + */ + public void cleanupPageStoreIfMatch(Predicate cacheGrpPred, boolean cleanFiles); + + /** + * Creates and initializes cache work directory retrieved from {@code cacheCfg}. + * + * @param cacheCfg Cache configuration. + * @return {@code True} if work directory already exists. + * + * @throws IgniteCheckedException If failed. + */ + public boolean checkAndInitCacheWorkDir(CacheConfiguration cacheCfg) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java index 8e8a1b3a43e9b..83c01d76d57cc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/IgniteWriteAheadLogManager.java @@ -21,6 +21,8 @@ import org.apache.ignite.IgniteException; import org.apache.ignite.internal.pagemem.wal.record.WALRecord; import org.apache.ignite.internal.processors.cache.GridCacheSharedManager; +import org.apache.ignite.internal.processors.cache.persistence.StorageException; +import org.apache.ignite.internal.processors.cache.persistence.wal.SegmentRouter; import org.apache.ignite.internal.processors.cluster.IgniteChangeGlobalStateSupport; /** @@ -45,6 +47,8 @@ public interface IgniteWriteAheadLogManager extends GridCacheSharedManager, Igni /** * Resumes logging after start. When WAL manager is started, it will skip logging any updates until this * method is called to avoid logging changes induced by the state restore procedure. + * + * @throws IgniteCheckedException If fails. */ public void resumeLogging(WALPointer lastWrittenPtr) throws IgniteCheckedException; @@ -68,8 +72,20 @@ public interface IgniteWriteAheadLogManager extends GridCacheSharedManager, Igni * @throws IgniteCheckedException If failed to write. * @throws StorageException If IO exception occurred during the write. If an exception is thrown from this * method, the WAL will be invalidated and the node will be stopped. + * @return Last WAL position which was flushed to WAL segment file. May be greater than or equal to a {@code ptr}. + * May be {@code null}, it means nothing has been flushed. */ - public void flush(WALPointer ptr, boolean explicitFsync) throws IgniteCheckedException, StorageException; + public WALPointer flush(WALPointer ptr, boolean explicitFsync) throws IgniteCheckedException, StorageException; + + /** + * Reads WAL record by the specified pointer. + * + * @param ptr WAL pointer. + * @return WAL record. + * @throws IgniteCheckedException If failed to read. + * @throws StorageException If IO error occurred while reading WAL entries. + */ + public WALRecord read(WALPointer ptr) throws IgniteCheckedException, StorageException; /** * Invoke this method to iterate over the written log entries. @@ -85,9 +101,8 @@ public interface IgniteWriteAheadLogManager extends GridCacheSharedManager, Igni * Invoke this method to reserve WAL history since provided pointer and prevent it's deletion. * * @param start WAL pointer. - * @throws IgniteException If failed to reserve. */ - public boolean reserve(WALPointer start) throws IgniteCheckedException; + public boolean reserve(WALPointer start); /** * Invoke this method to release WAL history since provided pointer that was previously reserved. @@ -114,24 +129,44 @@ public interface IgniteWriteAheadLogManager extends GridCacheSharedManager, Igni * * @param ptr Pointer for which it is safe to compact the log. */ - public void allowCompressionUntil(WALPointer ptr); + public void notchLastCheckpointPtr(WALPointer ptr); /** * @return Total number of segments in the WAL archive. */ public int walArchiveSegments(); + /** + * @return Last archived segment index. + */ + public long lastArchivedSegment(); + /** * Checks if WAL segment is under lock or reserved + * * @param ptr Pointer to check. * @return True if given pointer is located in reserved segment. */ public boolean reserved(WALPointer ptr); + /** + * Checks if WAL segments is under lock or reserved. + * + * @param low Pointer since which WAL is locked or reserved. If {@code null}, checks from the oldest segment. + * @param high Pointer for which WAL is locked or reserved. + * @return Number of reserved WAL segments. + */ + public int reserved(WALPointer low, WALPointer high); + /** * Checks WAL disabled for cache group. * * @param grpId Group id. */ public boolean disabled(int grpId); + + /** + * @return Info about of WAL paths. + */ + SegmentRouter getSegmentRouter(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/BaselineTopologyRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/BaselineTopologyRecord.java deleted file mode 100644 index 48b60b3924362..0000000000000 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/BaselineTopologyRecord.java +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.ignite.internal.pagemem.wal.record; - -import java.util.Map; -import org.apache.ignite.internal.util.typedef.internal.S; - -/** - * Record for storing baseline topology compact node ID to consistent node ID mapping. - */ -public class BaselineTopologyRecord extends WALRecord { - /** Id. */ - private int id; - - /** Compact ID to consistent ID mapping. */ - private Map mapping; - - /** - * Default constructor. - */ - private BaselineTopologyRecord() { - // No-op, used from factory methods. - } - - /** - * @param id Baseline topology ID. - * @param mapping Compact ID to consistent ID mapping. - */ - public BaselineTopologyRecord(int id, Map mapping) { - this.id = id; - this.mapping = mapping; - } - - /** {@inheritDoc} */ - @Override public RecordType type() { - return RecordType.BASELINE_TOP_RECORD; - } - - /** - * Returns baseline topology ID. - * - * @return Baseline topology ID. - */ - public int id() { - return id; - } - - /** - * Returns mapping. - * - * @return Compact ID to consistent ID mapping. - */ - public Map mapping() { - return mapping; - } - - /** {@inheritDoc} */ - @Override public String toString() { - return S.toString(BaselineTopologyRecord.class, this, "super", super.toString()); - } -} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataEntry.java index 3511affe5d531..d13a68ae518cc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataEntry.java @@ -157,6 +157,18 @@ public long partitionCounter() { return partCnt; } + /** + * Sets partition update counter to entry. + * + * @param partCnt Partition update counter. + * @return {@code this} for chaining. + */ + public DataEntry partitionCounter(long partCnt) { + this.partCnt = partCnt; + + return this; + } + /** * @return Expire time. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataRecord.java index 7a4d6b8793a21..d5ab53a7226c2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/DataRecord.java @@ -76,6 +76,13 @@ public DataRecord(List writeEntries, long timestamp) { this.writeEntries = writeEntries; } + /** + * @param writeEntries Write entries. + */ + public void setWriteEntries(List writeEntries) { + this.writeEntries = writeEntries; + } + /** * @return Collection of write entries. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/LazyDataEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/LazyDataEntry.java index 0ad87d7bf9c8e..6b56da5b7b3e5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/LazyDataEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/LazyDataEntry.java @@ -96,6 +96,9 @@ public LazyDataEntry( IgniteCacheObjectProcessor co = cctx.kernalContext().cacheObjects(); key = co.toKeyCacheObject(cacheCtx.cacheObjectContext(), keyType, keyBytes); + + if (key.partition() == -1) + key.partition(partId); } return key; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MemoryRecoveryRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MemoryRecoveryRecord.java index 8843eeedef90e..5a48b340d0ca4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MemoryRecoveryRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MemoryRecoveryRecord.java @@ -20,11 +20,11 @@ import org.apache.ignite.internal.util.typedef.internal.S; /** - * Marker that we start memory recovering + * Marker indicates that binary memory recovery has finished. */ public class MemoryRecoveryRecord extends WALRecord { /** Create timestamp, millis */ - private long time; + private final long time; /** * Default constructor. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MetastoreDataRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MetastoreDataRecord.java index e269de2adc012..9e734244c7b84 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MetastoreDataRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/MetastoreDataRecord.java @@ -18,13 +18,14 @@ package org.apache.ignite.internal.pagemem.wal.record; +import org.apache.ignite.internal.processors.cache.persistence.metastorage.MetaStorage; import org.apache.ignite.internal.util.typedef.internal.S; import org.jetbrains.annotations.Nullable; /** * */ -public class MetastoreDataRecord extends WALRecord { +public class MetastoreDataRecord extends WALRecord implements WalRecordCacheGroupAware { /** */ private final String key; @@ -59,4 +60,9 @@ public String key() { @Override public String toString() { return S.toString(MetastoreDataRecord.class, this, "super", super.toString()); } + + /** {@inheritDoc} */ + @Override public int groupId() { + return MetaStorage.METASTORAGE_CACHE_ID; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java index 1aa065e10df40..8957d9b6ec27b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/PageSnapshot.java @@ -19,8 +19,6 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; -import java.util.Arrays; -import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.util.GridUnsafe; @@ -92,10 +90,6 @@ public FullPageId fullPageId() { + "],\nsuper = [" + super.toString() + "]]"; } - catch (IgniteCheckedException ignored) { - return "Error during call'toString' of PageSnapshot [fullPageId=" + fullPageId() + - ", pageData = " + Arrays.toString(pageData) + ", super=" + super.toString() + "]"; - } finally { GridUnsafe.cleanDirectBuffer(buf); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/RollbackRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/RollbackRecord.java new file mode 100644 index 0000000000000..fd6a00ecb9e66 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/RollbackRecord.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record; + +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Rollback record. Used to close gap in partition update sequence on tx rollback. + */ +public class RollbackRecord extends WALRecord { + /** Cache ID. */ + @GridToStringInclude + protected int grpId; + + /** Partition ID. */ + @GridToStringInclude + protected int partId; + + /** Rollback start. */ + @GridToStringInclude + protected long start; + + /** Rollback range. */ + @GridToStringInclude + protected long range; + + /** + * @param grpId Group id. + * @param partId Partition id. + * @param start Start. + * @param range Range. + */ + public RollbackRecord(int grpId, int partId, long start, long range) { + this.grpId = grpId; + this.partId = partId; + this.start = start; + this.range = range; + } + + /** + * @return Cache ID. + */ + public int groupId() { + return grpId; + } + + /** + * @return Partition ID. + */ + public int partitionId() { + return partId; + } + + /** + * @return Rollback start. + */ + public long start() { + return start; + } + + /** + * @return Rollback range. + */ + public long range() { + return range; + } + + /** + * Returns a number of overlapping update counters. + * + * @param from From counter (not inclusive). + * @param to To counter (inclusive). + */ + public long overlap(long from, long to) { + long to0 = start + range; + + // from lies within (start, to0] + if (start <= from && from < to0) + return Math.min(to0 - from, to - from); + + // start lies within (from, to] + if (from <= start && start < to) + return Math.min(to - start, range); + + return 0; + } + + + + /** {@inheritDoc} */ + @Override public RecordType type() { + return RecordType.ROLLBACK_TX_RECORD; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(RollbackRecord.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java index 4fae179b63d28..87ca9d689f9c4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/WALRecord.java @@ -32,157 +32,202 @@ public abstract class WALRecord { */ public enum RecordType { /** */ - TX_RECORD, + TX_RECORD (0), /** */ - PAGE_RECORD, + PAGE_RECORD (1), /** */ - DATA_RECORD, + DATA_RECORD (2), /** Checkpoint (begin) record */ - CHECKPOINT_RECORD, + CHECKPOINT_RECORD (3), /** WAL segment header record. */ - HEADER_RECORD, + HEADER_RECORD (4), // Delta records. /** */ - INIT_NEW_PAGE_RECORD, + INIT_NEW_PAGE_RECORD (5), /** */ - DATA_PAGE_INSERT_RECORD, + DATA_PAGE_INSERT_RECORD (6), /** */ - DATA_PAGE_INSERT_FRAGMENT_RECORD, + DATA_PAGE_INSERT_FRAGMENT_RECORD (7), /** */ - DATA_PAGE_REMOVE_RECORD, + DATA_PAGE_REMOVE_RECORD (8), /** */ - DATA_PAGE_SET_FREE_LIST_PAGE, + DATA_PAGE_SET_FREE_LIST_PAGE (9), /** */ - BTREE_META_PAGE_INIT_ROOT, + BTREE_META_PAGE_INIT_ROOT (10), /** */ - BTREE_META_PAGE_ADD_ROOT, + BTREE_META_PAGE_ADD_ROOT (11), /** */ - BTREE_META_PAGE_CUT_ROOT, + BTREE_META_PAGE_CUT_ROOT (12), /** */ - BTREE_INIT_NEW_ROOT, + BTREE_INIT_NEW_ROOT (13), /** */ - BTREE_PAGE_RECYCLE, + BTREE_PAGE_RECYCLE (14), /** */ - BTREE_PAGE_INSERT, + BTREE_PAGE_INSERT (15), /** */ - BTREE_FIX_LEFTMOST_CHILD, + BTREE_FIX_LEFTMOST_CHILD (16), /** */ - BTREE_FIX_COUNT, + BTREE_FIX_COUNT (17), /** */ - BTREE_PAGE_REPLACE, + BTREE_PAGE_REPLACE (18), /** */ - BTREE_PAGE_REMOVE, + BTREE_PAGE_REMOVE (19), /** */ - BTREE_PAGE_INNER_REPLACE, + BTREE_PAGE_INNER_REPLACE (20), /** */ - BTREE_FIX_REMOVE_ID, + BTREE_FIX_REMOVE_ID (21), /** */ - BTREE_FORWARD_PAGE_SPLIT, + BTREE_FORWARD_PAGE_SPLIT (22), /** */ - BTREE_EXISTING_PAGE_SPLIT, + BTREE_EXISTING_PAGE_SPLIT (23), /** */ - BTREE_PAGE_MERGE, + BTREE_PAGE_MERGE (24), /** */ - PAGES_LIST_SET_NEXT, + PAGES_LIST_SET_NEXT (25), /** */ - PAGES_LIST_SET_PREVIOUS, + PAGES_LIST_SET_PREVIOUS (26), /** */ - PAGES_LIST_INIT_NEW_PAGE, + PAGES_LIST_INIT_NEW_PAGE (27), /** */ - PAGES_LIST_ADD_PAGE, + PAGES_LIST_ADD_PAGE (28), /** */ - PAGES_LIST_REMOVE_PAGE, + PAGES_LIST_REMOVE_PAGE (29), /** */ - META_PAGE_INIT, + META_PAGE_INIT (30), /** */ - PARTITION_META_PAGE_UPDATE_COUNTERS, + PARTITION_META_PAGE_UPDATE_COUNTERS (31), /** Memory recovering start marker */ - MEMORY_RECOVERY, + MEMORY_RECOVERY (32), /** */ - TRACKING_PAGE_DELTA, + TRACKING_PAGE_DELTA (33), /** Meta page update last successful snapshot id. */ - META_PAGE_UPDATE_LAST_SUCCESSFUL_SNAPSHOT_ID, + META_PAGE_UPDATE_LAST_SUCCESSFUL_SNAPSHOT_ID (34), /** Meta page update last successful full snapshot id. */ - META_PAGE_UPDATE_LAST_SUCCESSFUL_FULL_SNAPSHOT_ID, + META_PAGE_UPDATE_LAST_SUCCESSFUL_FULL_SNAPSHOT_ID (35), /** Meta page update next snapshot id. */ - META_PAGE_UPDATE_NEXT_SNAPSHOT_ID, + META_PAGE_UPDATE_NEXT_SNAPSHOT_ID (36), /** Meta page update last allocated index. */ - META_PAGE_UPDATE_LAST_ALLOCATED_INDEX, + META_PAGE_UPDATE_LAST_ALLOCATED_INDEX (37), /** Partition meta update state. */ - PART_META_UPDATE_STATE, + PART_META_UPDATE_STATE (38), /** Page list meta reset count record. */ - PAGE_LIST_META_RESET_COUNT_RECORD, + PAGE_LIST_META_RESET_COUNT_RECORD (39), - /** Switch segment record. */ - SWITCH_SEGMENT_RECORD, + /** Switch segment record. + * Marker record for indicate end of segment. + * If the next one record is written down exactly at the end of segment, + * SWITCH_SEGMENT_RECORD will not be written, if not then it means that we have more + * that one byte in the end,then we write SWITCH_SEGMENT_RECORD as marker end of segment. + * No need write CRC or WAL pointer for this record. It is byte marker record. + * */ + SWITCH_SEGMENT_RECORD (40), /** */ - DATA_PAGE_UPDATE_RECORD, + DATA_PAGE_UPDATE_RECORD (41), /** init */ - BTREE_META_PAGE_INIT_ROOT2, + BTREE_META_PAGE_INIT_ROOT2 (42), /** Partition destroy. */ - PARTITION_DESTROY, + PARTITION_DESTROY (43), /** Snapshot record. */ - SNAPSHOT, + SNAPSHOT (44), /** Metastore data record. */ - METASTORE_DATA_RECORD, + METASTORE_DATA_RECORD (45), /** Exchange record. */ - EXCHANGE, + EXCHANGE (46), - /** Baseline topology record. */ - BASELINE_TOP_RECORD; + /** Reserved for future record. */ + RESERVED (47), + + /** Rollback tx record. */ + ROLLBACK_TX_RECORD (57), + + /** */ + PARTITION_META_PAGE_UPDATE_COUNTERS_V2 (58), + + /** Init root meta page (with flags and created version) */ + BTREE_META_PAGE_INIT_ROOT_V3 (59); + + /** Index for serialization. Should be consistent throughout all versions. */ + private final int idx; + + /** + * @param idx Index for serialization. + */ + RecordType(int idx) { + this.idx = idx; + } + + /** + * @return Index for serialization. + */ + public int index() { + return idx; + } /** */ - private static final RecordType[] VALS = RecordType.values(); + private static final RecordType[] VALS; + + static { + RecordType[] recordTypes = RecordType.values(); + + int maxIdx = 0; + for (RecordType recordType : recordTypes) + maxIdx = Math.max(maxIdx, recordType.idx); + + VALS = new RecordType[maxIdx + 1]; + + for (RecordType recordType : recordTypes) + VALS[recordType.idx] = recordType; + } /** */ - public static RecordType fromOrdinal(int ord) { - return ord < 0 || ord >= VALS.length ? null : VALS[ord]; + public static RecordType fromIndex(int idx) { + return idx < 0 || idx >= VALS.length ? null : VALS[idx]; } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageInsertFragmentRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageInsertFragmentRecord.java index 5324d5605d72d..2b02bb5748fdb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageInsertFragmentRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/DataPageInsertFragmentRecord.java @@ -57,7 +57,7 @@ public DataPageInsertFragmentRecord( @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException { AbstractDataPageIO io = PageIO.getPageIO(pageAddr); - io.addRowFragment(pageAddr, payload, lastLink, pageMem.pageSize()); + io.addRowFragment(PageIO.getPageId(pageAddr), pageAddr, payload, lastLink, pageMem.pageSize()); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/InitNewPageRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/InitNewPageRecord.java index c177a04b6efca..1169351979400 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/InitNewPageRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/InitNewPageRecord.java @@ -40,7 +40,7 @@ public class InitNewPageRecord extends PageDeltaRecord { /** * @param grpId Cache group ID. - * @param pageId Page ID. + * @param pageId Page ID. * @param ioType IO type. * @param ioVer IO version. * @param newPageId New page ID. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRecord.java index ca995bf4e1f8d..822daca772971 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRecord.java @@ -19,7 +19,6 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.internal.pagemem.PageMemory; -import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PageMetaIO; import org.apache.ignite.internal.util.typedef.internal.S; @@ -46,8 +45,6 @@ public class MetaPageInitRecord extends InitNewPageRecord { public MetaPageInitRecord(int grpId, long pageId, int ioType, int ioVer, long treeRoot, long reuseListRoot) { super(grpId, pageId, ioType, ioVer, pageId); - assert ioType == PageIO.T_META || ioType == PageIO.T_PART_META; - this.treeRoot = treeRoot; this.reuseListRoot = reuseListRoot; this.ioType = ioType; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRootInlineFlagsCreatedVersionRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRootInlineFlagsCreatedVersionRecord.java new file mode 100644 index 0000000000000..1163f1f7e6d05 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageInitRootInlineFlagsCreatedVersionRecord.java @@ -0,0 +1,98 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record.delta; + +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.IgniteVersionUtils; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.BPlusMetaIO; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.lang.IgniteProductVersion; + +/** + * + */ +public class MetaPageInitRootInlineFlagsCreatedVersionRecord extends MetaPageInitRootInlineRecord { + /** Created version. */ + private final long flags; + + /** Created version. */ + private final IgniteProductVersion createdVer; + + /** + * @param grpId Cache group ID. + * @param pageId Meta page ID. + * @param rootId Root id. + * @param inlineSize Inline size. + */ + public MetaPageInitRootInlineFlagsCreatedVersionRecord(int grpId, long pageId, long rootId, int inlineSize) { + super(grpId, pageId, rootId, inlineSize); + + createdVer = IgniteVersionUtils.VER; + flags = BPlusMetaIO.FLAGS_DEFAULT; + } + + /** + * @param grpId Cache group ID. + * @param pageId Meta page ID. + * @param rootId Root id. + * @param inlineSize Inline size. + * @param flags Flags. + * @param createdVer The version of ignite that creates this tree. + */ + public MetaPageInitRootInlineFlagsCreatedVersionRecord(int grpId, long pageId, long rootId, int inlineSize, + long flags, IgniteProductVersion createdVer) { + super(grpId, pageId, rootId, inlineSize); + + this.flags = flags; + this.createdVer = createdVer; + } + + /** {@inheritDoc} */ + @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException { + super.applyDelta(pageMem, pageAddr); + + BPlusMetaIO io = BPlusMetaIO.VERSIONS.forPage(pageAddr); + + io.initFlagsAndVersion(pageAddr, flags, createdVer); + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return RecordType.BTREE_META_PAGE_INIT_ROOT_V3; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(MetaPageInitRootInlineFlagsCreatedVersionRecord.class, this, "super", super.toString()); + } + + /** + * @return Created version. + */ + public IgniteProductVersion createdVersion() { + return createdVer; + } + + /** + * @return Meta page flags. + */ + public long flags() { + return flags; + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateLastAllocatedIndex.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateLastAllocatedIndex.java index 39f6a0335b1bd..324227be6fc4c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateLastAllocatedIndex.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateLastAllocatedIndex.java @@ -41,9 +41,11 @@ public MetaPageUpdateLastAllocatedIndex(int grpId, long pageId, int lastAllocate /** {@inheritDoc} */ @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException { - assert PageIO.getType(pageAddr) == PageIO.T_META || PageIO.getType(pageAddr) == PageIO.T_PART_META; + int type = PageIO.getType(pageAddr); - PageMetaIO io = PageMetaIO.VERSIONS.forVersion(PageIO.getVersion(pageAddr)); + assert type == PageIO.T_META || type == PageIO.T_PART_META; + + PageMetaIO io = PageIO.getPageIO(type, PageIO.getVersion(pageAddr)); io.setLastAllocatedPageCount(pageAddr, lastAllocatedIdx); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateNextSnapshotId.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateNextSnapshotId.java index 2046ecd274d95..5068fe5b45ec5 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateNextSnapshotId.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdateNextSnapshotId.java @@ -27,22 +27,22 @@ */ public class MetaPageUpdateNextSnapshotId extends PageDeltaRecord { /** */ - private final long nextSnapshotId; + private final long nextSnapshotTag; /** * @param pageId Meta page ID. */ - public MetaPageUpdateNextSnapshotId(int grpId, long pageId, long nextSnapshotId) { + public MetaPageUpdateNextSnapshotId(int grpId, long pageId, long nextSnapshotTag) { super(grpId, pageId); - this.nextSnapshotId = nextSnapshotId; + this.nextSnapshotTag = nextSnapshotTag; } /** {@inheritDoc} */ @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException { PageMetaIO io = PageMetaIO.VERSIONS.forPage(pageAddr); - io.setNextSnapshotTag(pageAddr, nextSnapshotId); + io.setNextSnapshotTag(pageAddr, nextSnapshotTag); } /** {@inheritDoc} */ @@ -54,7 +54,7 @@ public MetaPageUpdateNextSnapshotId(int grpId, long pageId, long nextSnapshotId) * @return Root ID. */ public long nextSnapshotId() { - return nextSnapshotId; + return nextSnapshotTag; } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecord.java index bafbf475abe9f..3e2b67bd3c538 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecord.java @@ -17,7 +17,11 @@ package org.apache.ignite.internal.pagemem.wal.record.delta; +import java.io.DataInput; +import java.io.IOException; +import java.nio.ByteBuffer; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdUtils; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIO; import org.apache.ignite.internal.util.typedef.internal.S; @@ -32,7 +36,7 @@ public class MetaPageUpdatePartitionDataRecord extends PageDeltaRecord { /** */ private long globalRmvId; - /** */ + /** TODO: Partition size may be long */ private int partSize; /** */ @@ -55,9 +59,9 @@ public MetaPageUpdatePartitionDataRecord( long updateCntr, long globalRmvId, int partSize, - long cntrsPageId, byte state, - int allocatedIdxCandidate - ) { + long cntrsPageId, + byte state, + int allocatedIdxCandidate) { super(grpId, pageId); this.updateCntr = updateCntr; @@ -68,6 +72,20 @@ public MetaPageUpdatePartitionDataRecord( this.cntrsPageId = cntrsPageId; } + /** + * @param in Input. + */ + public MetaPageUpdatePartitionDataRecord(DataInput in) throws IOException{ + super(in.readInt(), in.readLong()); + + this.updateCntr = in.readLong(); + this.globalRmvId = in.readLong(); + this.partSize = in.readInt(); + this.cntrsPageId = in.readLong(); + this.state = in.readByte(); + this.allocatedIdxCandidate = in.readInt(); + } + /** * @return Update counter. */ @@ -122,6 +140,21 @@ public int allocatedIndexCandidate() { return allocatedIdxCandidate; } + /** + * @param buf Buffer. + */ + public void toBytes(ByteBuffer buf) { + buf.putInt(groupId()); + buf.putLong(pageId()); + + buf.putLong(updateCounter()); + buf.putLong(globalRemoveId()); + buf.putInt(partitionSize()); + buf.putLong(countersPageId()); + buf.put(state()); + buf.putInt(allocatedIndexCandidate()); + } + /** {@inheritDoc} */ @Override public RecordType type() { return RecordType.PARTITION_META_PAGE_UPDATE_COUNTERS; @@ -129,6 +162,6 @@ public int allocatedIndexCandidate() { /** {@inheritDoc} */ @Override public String toString() { - return S.toString(MetaPageUpdatePartitionDataRecord.class, this, "super", super.toString()); + return S.toString(MetaPageUpdatePartitionDataRecord.class, this, "partId", PageIdUtils.partId(pageId()), "super", super.toString()); } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecordV2.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecordV2.java new file mode 100644 index 0000000000000..ab3ccf8f35a37 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/MetaPageUpdatePartitionDataRecordV2.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.pagemem.wal.record.delta; + +import java.io.DataInput; +import java.io.IOException; +import java.nio.ByteBuffer; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.PageIdUtils; +import org.apache.ignite.internal.pagemem.PageMemory; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIO; +import org.apache.ignite.internal.processors.cache.persistence.tree.io.PagePartitionMetaIOV2; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * + */ +public class MetaPageUpdatePartitionDataRecordV2 extends MetaPageUpdatePartitionDataRecord { + /** */ + private long link; + + /** + * @param grpId Group id. + * @param pageId Page id. + * @param updateCntr Update counter. + * @param globalRmvId Global remove id. + * @param partSize Partition size. + * @param cntrsPageId Cntrs page id. + * @param state State. + * @param allocatedIdxCandidate Allocated index candidate. + * @param link Link. + */ + public MetaPageUpdatePartitionDataRecordV2( + int grpId, + long pageId, + long updateCntr, + long globalRmvId, + int partSize, + long cntrsPageId, + byte state, + int allocatedIdxCandidate, + long link) { + super(grpId, pageId, updateCntr, globalRmvId, partSize, cntrsPageId, state, allocatedIdxCandidate); + this.link = link; + } + + /** + * @param in Input. + */ + public MetaPageUpdatePartitionDataRecordV2(DataInput in) throws IOException { + super(in); + + this.link = in.readLong(); + } + + /** {@inheritDoc} */ + @Override public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException { + super.applyDelta(pageMem, pageAddr); + + PagePartitionMetaIOV2 io = (PagePartitionMetaIOV2)PagePartitionMetaIO.VERSIONS.forPage(pageAddr); + + io.setGapsLink(pageAddr, link); + } + + /** + * + */ + public long link() { + return link; + } + + /** {@inheritDoc} */ + @Override public void toBytes(ByteBuffer buf) { + super.toBytes(buf); + + buf.putLong(link()); + } + + /** {@inheritDoc} */ + @Override public RecordType type() { + return RecordType.PARTITION_META_PAGE_UPDATE_COUNTERS_V2; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(MetaPageUpdatePartitionDataRecordV2.class, this, "partId", PageIdUtils.partId(pageId()), "super", super.toString()); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/PageDeltaRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/PageDeltaRecord.java index a70907a053a10..6247558e1f250 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/PageDeltaRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/PageDeltaRecord.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.pagemem.wal.record.delta; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.internal.pagemem.FullPageId; import org.apache.ignite.internal.pagemem.PageMemory; import org.apache.ignite.internal.pagemem.wal.record.WALRecord; import org.apache.ignite.internal.pagemem.wal.record.WalRecordCacheGroupAware; @@ -52,6 +53,13 @@ public long pageId() { return pageId; } + /** + * @return Full page ID. + */ + public FullPageId fullPageId() { + return new FullPageId(pageId, grpId); + } + /** {@inheritDoc} */ @Override public int groupId() { return grpId; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/PartitionMetaStateRecord.java b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/PartitionMetaStateRecord.java index a89f7be04b406..d8bcbdaa1f477 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/PartitionMetaStateRecord.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/pagemem/wal/record/delta/PartitionMetaStateRecord.java @@ -19,7 +19,7 @@ import org.apache.ignite.internal.pagemem.wal.record.WALRecord; import org.apache.ignite.internal.pagemem.wal.record.WalRecordCacheGroupAware; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; import org.apache.ignite.internal.util.typedef.internal.S; /** @@ -35,12 +35,14 @@ public class PartitionMetaStateRecord extends WALRecord implements WalRecordCach /** Partition id. */ private final int partId; - /** Update counter. */ + /** @deprecated Update counter. */ private final long updateCounter; /** * @param grpId Cache group ID. - * @param state Page ID. + * @param partId Partition ID. + * @param state State. + * @param updateCounter Update counter. */ public PartitionMetaStateRecord(int grpId, int partId, GridDhtPartitionState state, long updateCounter) { this.grpId = grpId; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityAssignment.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityAssignment.java index f78ab603a1232..3557a24fa161a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityAssignment.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityAssignment.java @@ -19,10 +19,13 @@ import org.apache.ignite.cluster.ClusterNode; -import java.util.HashSet; +import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Set; import java.util.UUID; +import org.apache.ignite.IgniteSystemProperties; +import org.apache.ignite.internal.util.typedef.internal.U; /** * Cached affinity calculations. @@ -34,6 +37,18 @@ public interface AffinityAssignment { */ public boolean clientEventChange(); + /** Size threshold to use Map instead of List view. */ + int IGNITE_AFFINITY_BACKUPS_THRESHOLD = IgniteSystemProperties.getInteger( + IgniteSystemProperties.IGNITE_AFFINITY_BACKUPS_THRESHOLD, + 5 + ); + + /** Disable memory affinity optimizations. */ + boolean IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION = IgniteSystemProperties.getBoolean( + IgniteSystemProperties.IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION, + false + ); + /** * @return Affinity assignment computed by affinity function. */ @@ -63,7 +78,7 @@ public interface AffinityAssignment { * @param part Partition. * @return Affinity nodes IDs. */ - public HashSet getIds(int part); + public Collection getIds(int part); /** * @return Nodes having parimary and backup assignments. @@ -90,4 +105,18 @@ public interface AffinityAssignment { * @return Backup partitions for specified node ID. */ public Set backupPartitions(UUID nodeId); -} \ No newline at end of file + + /** + * Converts List of Cluster Nodes to HashSet of UUIDs wrapped as unmodifiable collection. + * @param assignmentPart Source assignment per partition. + * @return List of deduplicated collections if ClusterNode's ids. + */ + public default Collection assignments2ids(List assignmentPart) { + Collection partIds = U.newHashSet(assignmentPart.size()); + + for (ClusterNode node : assignmentPart) + partIds.add(node.id()); + + return Collections.unmodifiableCollection(partIds); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java index 44b27534dee62..3b9119b6d5b92 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/AffinityTopologyVersion.java @@ -112,6 +112,31 @@ public int minorTopologyVersion() { return cmp; } + /** + * @param lower Lower bound. + * @param upper Upper bound. + * @return {@code True} if this topology version is within provided bounds (inclusive). + */ + public final boolean isBetween(AffinityTopologyVersion lower, AffinityTopologyVersion upper) { + return compareTo(lower) >= 0 && compareTo(upper) <= 0; + } + + /** + * @param topVer Test version. + * @return {@code True} if this topology happens strictly after than {@code topVer}. + */ + public final boolean after(AffinityTopologyVersion topVer) { + return compareTo(topVer) > 0; + } + + /** + * @param topVer Test version. + * @return {@code True} if this topology happens strictly before than {@code topVer}. + */ + public final boolean before(AffinityTopologyVersion topVer) { + return compareTo(topVer) < 0; + } + /** {@inheritDoc} */ @Override public void onAckReceived() { // No-op. @@ -219,4 +244,4 @@ public int minorTopologyVersion() { @Override public String toString() { return S.toString(AffinityTopologyVersion.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignment.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignment.java index 6da6aaa5c2d6d..d40b162137a47 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignment.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignment.java @@ -31,7 +31,11 @@ /** * Cached affinity calculations. + * + * Deprecated GridAffinityAssignment doesn't support versioning. + * Use GridAffinityAssignmentV2 instead. */ +@Deprecated @SuppressWarnings("ForLoopReplaceableByForEach") public class GridAffinityAssignment implements AffinityAssignment, Serializable { /** */ @@ -71,8 +75,8 @@ public class GridAffinityAssignment implements AffinityAssignment, Serializable */ GridAffinityAssignment(AffinityTopologyVersion topVer) { this.topVer = topVer; - primary = new HashMap<>(); - backup = new HashMap<>(); + primary = Collections.emptyMap(); + backup = Collections.emptyMap(); clientEvtChange = false; } @@ -91,10 +95,10 @@ public class GridAffinityAssignment implements AffinityAssignment, Serializable this.topVer = topVer; this.assignment = assignment; this.idealAssignment = idealAssignment.equals(assignment) ? assignment : idealAssignment; + clientEvtChange = false; primary = new HashMap<>(); backup = new HashMap<>(); - clientEvtChange = false; initPrimaryBackupMaps(); } @@ -125,21 +129,21 @@ public boolean clientEventChange() { /** * @return Affinity assignment computed by affinity function. */ - public List> idealAssignment() { + @Override public List> idealAssignment() { return idealAssignment; } /** * @return Affinity assignment. */ - public List> assignment() { + @Override public List> assignment() { return assignment; } /** * @return Topology version. */ - public AffinityTopologyVersion topologyVersion() { + @Override public AffinityTopologyVersion topologyVersion() { return topVer; } @@ -149,7 +153,7 @@ public AffinityTopologyVersion topologyVersion() { * @param part Partition. * @return Affinity nodes. */ - public List get(int part) { + @Override public List get(int part) { assert part >= 0 && part < assignment.size() : "Affinity partition is out of range" + " [part=" + part + ", partitions=" + assignment.size() + ']'; @@ -162,7 +166,7 @@ public List get(int part) { * @param part Partition. * @return Affinity nodes IDs. */ - public HashSet getIds(int part) { + @Override public HashSet getIds(int part) { assert part >= 0 && part < assignment.size() : "Affinity partition is out of range" + " [part=" + part + ", partitions=" + assignment.size() + ']'; @@ -196,7 +200,7 @@ public HashSet getIds(int part) { for (int p = 0; p < assignment.size(); p++) { List nodes = assignment.get(p); - if (nodes.size() > 0) + if (!nodes.isEmpty()) res.addAll(nodes); } @@ -216,7 +220,7 @@ public HashSet getIds(int part) { for (int p = 0; p < assignment.size(); p++) { List nodes = assignment.get(p); - if (nodes.size() > 0) + if (!nodes.isEmpty()) res.add(nodes.get(0)); } @@ -232,7 +236,7 @@ public HashSet getIds(int part) { * @param nodeId Node ID to get primary partitions for. * @return Primary partitions for specified node ID. */ - public Set primaryPartitions(UUID nodeId) { + @Override public Set primaryPartitions(UUID nodeId) { Set set = primary.get(nodeId); return set == null ? Collections.emptySet() : set; @@ -244,7 +248,7 @@ public Set primaryPartitions(UUID nodeId) { * @param nodeId Node ID to get backup partitions for. * @return Backup partitions for specified node ID. */ - public Set backupPartitions(UUID nodeId) { + @Override public Set backupPartitions(UUID nodeId) { Set set = backup.get(nodeId); return set == null ? Collections.emptySet() : set; @@ -293,7 +297,7 @@ private void initPrimaryBackupMaps() { if (o == this) return true; - if (o == null || !(o instanceof AffinityAssignment)) + if (!(o instanceof AffinityAssignment)) return false; return topVer.equals(((AffinityAssignment)o).topologyVersion()); @@ -303,4 +307,4 @@ private void initPrimaryBackupMaps() { @Override public String toString() { return S.toString(GridAffinityAssignment.class, this, super.toString()); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java index 18edd028a2eef..6aee4b2b08967 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentCache.java @@ -48,6 +48,7 @@ import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.SB; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgnitePredicate; import org.jetbrains.annotations.Nullable; @@ -61,7 +62,10 @@ */ public class GridAffinityAssignmentCache { /** Cleanup history size. */ - private final int MAX_HIST_SIZE = getInteger(IGNITE_AFFINITY_HISTORY_SIZE, 500); + private final int MAX_HIST_SIZE = getInteger(IGNITE_AFFINITY_HISTORY_SIZE, 50); + + /** Cleanup history links size (calculated by both real entries and shallow copies). */ + private final int MAX_HIST_LINKS_SIZE = MAX_HIST_SIZE * 10; /** Group name if specified or cache name. */ private final String cacheOrGrpName; @@ -94,7 +98,7 @@ public class GridAffinityAssignmentCache { private List> baselineAssignment; /** Cache item corresponding to the head topology version. */ - private final AtomicReference head; + private final AtomicReference head; /** Ready futures. */ private final ConcurrentMap readyFuts = new ConcurrentSkipListMap<>(); @@ -114,9 +118,6 @@ public class GridAffinityAssignmentCache { /** Node stop flag. */ private volatile IgniteCheckedException stopErr; - /** History size ignoring client events changes. */ - private final AtomicInteger histSize = new AtomicInteger(); - /** Full history size. */ private final AtomicInteger fullHistSize = new AtomicInteger(); @@ -162,7 +163,7 @@ public GridAffinityAssignmentCache(GridKernalContext ctx, partsCnt = aff.partitions(); affCache = new ConcurrentSkipListMap<>(); - head = new AtomicReference<>(new GridAffinityAssignment(AffinityTopologyVersion.NONE)); + head = new AtomicReference<>(new GridAffinityAssignmentV2(AffinityTopologyVersion.NONE)); similarAffKey = ctx.affinity().similaryAffinityKey(aff, nodeFilter, backups, partsCnt); @@ -198,11 +199,15 @@ public int groupId() { */ public void initialize(AffinityTopologyVersion topVer, List> affAssignment) { assert topVer.compareTo(lastVersion()) >= 0 : "[topVer = " + topVer + ", last=" + lastVersion() + ']'; + assert idealAssignment != null; - GridAffinityAssignment assignment = new GridAffinityAssignment(topVer, affAssignment, idealAssignment); + GridAffinityAssignmentV2 assignment = new GridAffinityAssignmentV2(topVer, affAssignment, idealAssignment); + + HistoryAffinityAssignmentImpl newHistEntry = new HistoryAffinityAssignmentImpl(assignment, backups); + + HistoryAffinityAssignment existing = affCache.put(topVer, newHistEntry); - affCache.put(topVer, new HistoryAffinityAssignment(assignment)); head.set(assignment); for (Map.Entry entry : readyFuts.entrySet()) { @@ -215,7 +220,13 @@ public void initialize(AffinityTopologyVersion topVer, List> a } } - onHistoryAdded(assignment); + onHistoryAdded(existing, newHistEntry); + + if (log.isTraceEnabled()) { + log.trace("New affinity assignment [grp=" + cacheOrGrpName + + ", topVer=" + topVer + + ", aff=" + fold(affAssignment) + "]"); + } } /** @@ -259,7 +270,9 @@ public void onReconnected() { affCache.clear(); - head.set(new GridAffinityAssignment(AffinityTopologyVersion.NONE)); + fullHistSize.set(0); + + head.set(new GridAffinityAssignmentV2(AffinityTopologyVersion.NONE)); stopErr = null; } @@ -315,7 +328,7 @@ public List> calculate( for (DiscoveryEvent event : events.events()) { boolean affinityNode = CU.affinityNode(event.eventNode(), nodeFilter); - if (affinityNode) { + if (affinityNode || event.type() == EVT_DISCOVERY_CUSTOM_EVT) { skipCalculation = false; break; @@ -427,14 +440,26 @@ private List> currentBaselineAssignment(AffinityTopologyVersio public void clientEventTopologyChange(DiscoveryEvent evt, AffinityTopologyVersion topVer) { assert topVer.compareTo(lastVersion()) >= 0 : "[topVer = " + topVer + ", last=" + lastVersion() + ']'; - GridAffinityAssignment aff = head.get(); + GridAffinityAssignmentV2 aff = head.get(); + + assert evt.type() == EVT_DISCOVERY_CUSTOM_EVT || aff.primaryPartitions(evt.eventNode().id()).isEmpty() : evt; - assert evt.type() == EVT_DISCOVERY_CUSTOM_EVT || aff.primaryPartitions(evt.eventNode().id()).isEmpty() : evt; - assert evt.type() == EVT_DISCOVERY_CUSTOM_EVT || aff.backupPartitions(evt.eventNode().id()).isEmpty() : evt; + assert evt.type() == EVT_DISCOVERY_CUSTOM_EVT || aff.backupPartitions(evt.eventNode().id()).isEmpty() : evt; - GridAffinityAssignment assignmentCpy = new GridAffinityAssignment(topVer, aff); + GridAffinityAssignmentV2 assignmentCpy = new GridAffinityAssignmentV2(topVer, aff); + + AffinityTopologyVersion prevVer = topVer.minorTopologyVersion() == 0 ? + new AffinityTopologyVersion(topVer.topologyVersion() - 1, Integer.MAX_VALUE) : + new AffinityTopologyVersion(topVer.topologyVersion(), topVer.minorTopologyVersion() - 1); + + Map.Entry prevHistEntry = affCache.floorEntry(prevVer); + + HistoryAffinityAssignment newHistEntry = (prevHistEntry == null) ? + new HistoryAffinityAssignmentImpl(assignmentCpy, backups) : + new HistoryAffinityAssignmentShallowCopy(prevHistEntry.getValue().origin(), topVer); + + HistoryAffinityAssignment existing = affCache.put(topVer, newHistEntry); - affCache.put(topVer, new HistoryAffinityAssignment(assignmentCpy)); head.set(assignmentCpy); for (Map.Entry entry : readyFuts.entrySet()) { @@ -447,7 +472,7 @@ public void clientEventTopologyChange(DiscoveryEvent evt, AffinityTopologyVersio } } - onHistoryAdded(assignmentCpy); + onHistoryAdded(existing, newHistEntry); } /** @@ -462,7 +487,7 @@ public AffinityTopologyVersion lastVersion() { * @return Affinity assignment. */ public List> assignments(AffinityTopologyVersion topVer) { - AffinityAssignment aff = cachedAffinity(topVer); + AffinityAssignment aff = cachedAffinity(topVer, AffinityTopologyVersion.NONE); return aff.assignment(); } @@ -485,7 +510,7 @@ public List> readyAssignments(AffinityTopologyVersion topVer) * @return Future that will be completed after affinity for topology version {@code topVer} is calculated. */ @Nullable public IgniteInternalFuture readyFuture(AffinityTopologyVersion topVer) { - GridAffinityAssignment aff = head.get(); + GridAffinityAssignmentV2 aff = head.get(); if (aff.topologyVersion().compareTo(topVer) >= 0) { if (log.isDebugEnabled()) @@ -529,7 +554,7 @@ public int partitions() { */ public List nodes(int part, AffinityTopologyVersion topVer) { // Resolve cached affinity nodes. - return cachedAffinity(topVer).get(part); + return cachedAffinity(topVer, AffinityTopologyVersion.NONE).get(part); } /** @@ -540,7 +565,7 @@ public List nodes(int part, AffinityTopologyVersion topVer) { * @return Primary partitions for specified node ID. */ public Set primaryPartitions(UUID nodeId, AffinityTopologyVersion topVer) { - return cachedAffinity(topVer).primaryPartitions(nodeId); + return cachedAffinity(topVer, AffinityTopologyVersion.NONE).primaryPartitions(nodeId); } /** @@ -551,7 +576,7 @@ public Set primaryPartitions(UUID nodeId, AffinityTopologyVersion topVe * @return Backup partitions for specified node ID. */ public Set backupPartitions(UUID nodeId, AffinityTopologyVersion topVer) { - return cachedAffinity(topVer).backupPartitions(nodeId); + return cachedAffinity(topVer, AffinityTopologyVersion.NONE).backupPartitions(nodeId); } /** @@ -611,30 +636,69 @@ public AffinityAssignment readyAffinity(AffinityTopologyVersion topVer) { * @return Cached affinity. */ public AffinityAssignment cachedAffinity(AffinityTopologyVersion topVer) { + AffinityTopologyVersion lastAffChangeTopVer = + ctx.cache().context().exchange().lastAffinityChangedTopologyVersion(topVer); + + return cachedAffinity(topVer, lastAffChangeTopVer); + } + + /** + * Get cached affinity for specified topology version. + * + * @param topVer Topology version for which affinity assignment is requested. + * @param lastAffChangeTopVer Topology version of last affinity assignment change. + * @return Cached affinity. + */ + public AffinityAssignment cachedAffinity( + AffinityTopologyVersion topVer, + AffinityTopologyVersion lastAffChangeTopVer + ) { if (topVer.equals(AffinityTopologyVersion.NONE)) - topVer = lastVersion(); - else - awaitTopologyVersion(topVer); + topVer = lastAffChangeTopVer = lastVersion(); + else { + if (lastAffChangeTopVer.equals(AffinityTopologyVersion.NONE)) + lastAffChangeTopVer = topVer; + + awaitTopologyVersion(lastAffChangeTopVer); + } assert topVer.topologyVersion() >= 0 : topVer; AffinityAssignment cache = head.get(); - if (!cache.topologyVersion().equals(topVer)) { - cache = affCache.get(topVer); + if (!(cache.topologyVersion().compareTo(lastAffChangeTopVer) >= 0 && + cache.topologyVersion().compareTo(topVer) <= 0)) { + + Map.Entry e = affCache.ceilingEntry(lastAffChangeTopVer); + + if (e != null) + cache = e.getValue(); if (cache == null) { + throw new IllegalStateException("Getting affinity for too old topology version that is already " + + "out of history [locNode=" + ctx.discovery().localNode() + + ", grp=" + cacheOrGrpName + + ", topVer=" + topVer + + ", lastAffChangeTopVer=" + lastAffChangeTopVer + + ", head=" + head.get().topologyVersion() + + ", history=" + affCache.keySet() + + ']'); + } + + if (cache.topologyVersion().compareTo(topVer) > 0) { throw new IllegalStateException("Getting affinity for topology version earlier than affinity is " + "calculated [locNode=" + ctx.discovery().localNode() + ", grp=" + cacheOrGrpName + ", topVer=" + topVer + + ", lastAffChangeTopVer=" + lastAffChangeTopVer + ", head=" + head.get().topologyVersion() + ", history=" + affCache.keySet() + ']'); } } - assert cache.topologyVersion().equals(topVer) : "Invalid cached affinity: " + cache; + assert cache.topologyVersion().compareTo(lastAffChangeTopVer) >= 0 && + cache.topologyVersion().compareTo(topVer) <= 0 : "Invalid cached affinity: [cache=" + cache + ", topVer=" + topVer + ", lastAffChangedTopVer=" + lastAffChangeTopVer + "]"; return cache; } @@ -690,7 +754,7 @@ public void init(GridAffinityAssignmentCache aff) { * @param topVer Topology version to wait. */ private void awaitTopologyVersion(AffinityTopologyVersion topVer) { - GridAffinityAssignment aff = head.get(); + GridAffinityAssignmentV2 aff = head.get(); if (aff.topologyVersion().compareTo(topVer) >= 0) return; @@ -724,40 +788,70 @@ private void awaitTopologyVersion(AffinityTopologyVersion topVer) { } /** - * @param aff Added affinity assignment. + * Cleaning the affinity history. + * + * @param replaced Replaced entry in case history item was already present, null otherwise. + * @param added New history item. */ - private void onHistoryAdded(GridAffinityAssignment aff) { - int fullSize = fullHistSize.incrementAndGet(); + private void onHistoryAdded( + HistoryAffinityAssignment replaced, + HistoryAffinityAssignment added + ) { + boolean cleanupNeeded = false; - int size; + if (replaced == null) { + cleanupNeeded = true; - if (aff.clientEventChange()) - size = histSize.get(); - else - size = histSize.incrementAndGet(); - - int rmvCnt = size - MAX_HIST_SIZE; + if (added.requiresHistoryCleanup()) + fullHistSize.incrementAndGet(); + } + else { + if (replaced.requiresHistoryCleanup() != added.requiresHistoryCleanup()) { + if (added.requiresHistoryCleanup()) { + cleanupNeeded = true; - if (rmvCnt <= 0) { - if (fullSize > MAX_HIST_SIZE * 2) - rmvCnt = MAX_HIST_SIZE; + fullHistSize.incrementAndGet(); + } + else + fullHistSize.decrementAndGet(); + } } - if (rmvCnt > 0) { + if (!cleanupNeeded) + return; + + int fullSize = fullHistSize.get(); + + int linksSize = affCache.size(); + + int fullRmvCnt = fullSize > MAX_HIST_SIZE ? (MAX_HIST_SIZE / 2) : 0; + + int linksRmvCnt = linksSize > MAX_HIST_LINKS_SIZE ? (MAX_HIST_LINKS_SIZE / 2) : 0; + + if (fullRmvCnt > 0 || linksRmvCnt > 0) { Iterator it = affCache.values().iterator(); - while (it.hasNext() && rmvCnt > 0) { - AffinityAssignment aff0 = it.next(); + AffinityTopologyVersion topVerRmv = null; - it.remove(); + while (it.hasNext() && (fullRmvCnt > 0 || linksRmvCnt > 0)) { + HistoryAffinityAssignment aff0 = it.next(); + + if (aff0.requiresHistoryCleanup()) { // Don't decrement counter in case of fullHistoryCleanupRequired copy remove. + fullRmvCnt--; - rmvCnt--; + fullHistSize.decrementAndGet(); + } + + linksRmvCnt--; - if (!aff0.clientEventChange()) - histSize.decrementAndGet(); + it.remove(); - fullHistSize.decrementAndGet(); + topVerRmv = aff0.topologyVersion(); } + + topVerRmv = it.hasNext() ? it.next().topologyVersion() : topVerRmv; + + ctx.affinity().removeCachedAffinity(topVerRmv); } } @@ -768,6 +862,36 @@ public Collection cachedVersions() { return affCache.keySet(); } + /** + * @param affAssignment Affinity assignment. + * @return String representation of given {@code affAssignment}. + */ + private static String fold(List> affAssignment) { + SB sb = new SB(); + + for (int p = 0; p < affAssignment.size(); p++) { + sb.a("Part ["); + sb.a("id=" + p + ", "); + + SB partOwners = new SB(); + + List affOwners = affAssignment.get(p); + + for (ClusterNode node : affOwners) { + partOwners.a(node.consistentId()); + partOwners.a(' '); + } + + sb.a("owners=["); + sb.a(partOwners); + sb.a(']'); + + sb.a("] "); + } + + return sb.toString(); + } + /** * Affinity ready future. Will remove itself from ready futures map. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentV2.java new file mode 100644 index 0000000000000..6630d493dff33 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityAssignmentV2.java @@ -0,0 +1,346 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.affinity; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.BitSetIntSet; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * Cached affinity calculations V2. + * It supports adaptive usage of BitSets instead of HashSets. + */ +@SuppressWarnings("ForLoopReplaceableByForEach") +public class GridAffinityAssignmentV2 extends IgniteDataTransferObject implements AffinityAssignment { + /** */ + private static final long serialVersionUID = 0L; + + /** Topology version. */ + private AffinityTopologyVersion topVer; + + /** Collection of calculated affinity nodes. */ + private List> assignment; + + /** Map of primary node partitions. */ + private Map> primary; + + /** Map of backup node partitions. */ + private Map> backup; + + /** Assignment node IDs */ + private transient volatile List> assignmentIds; + + /** Nodes having primary or backup partition assignments. */ + private transient volatile Set nodes; + + /** Nodes having primary partitions assignments. */ + private transient volatile Set primaryPartsNodes; + + /** */ + private transient List> idealAssignment; + + /** */ + private transient boolean clientEvtChange; + + /** + * Default constructor for deserialization. + */ + public GridAffinityAssignmentV2() { + //No-op + } + + /** + * Constructs cached affinity calculations item. + * + * @param topVer Topology version. + */ + GridAffinityAssignmentV2(AffinityTopologyVersion topVer) { + this.topVer = topVer; + primary = Collections.emptyMap(); + backup = Collections.emptyMap(); + clientEvtChange = false; + } + + /** + * @param topVer Topology version. + * @param assignment Assignment. + * @param idealAssignment Ideal assignment. + */ + public GridAffinityAssignmentV2(AffinityTopologyVersion topVer, + List> assignment, + List> idealAssignment + ) { + assert topVer != null; + assert assignment != null; + assert idealAssignment != null; + + this.topVer = topVer; + this.assignment = Collections.unmodifiableList(assignment); // It's important to keep equal references. + this.idealAssignment = + idealAssignment.equals(assignment) ? this.assignment : Collections.unmodifiableList(idealAssignment); + + clientEvtChange = false; + + // Temporary mirrors with modifiable partition's collections. + Map> tmpPrimary = new HashMap<>(); + Map> tmpBackup = new HashMap<>(); + boolean isPrimary; + + for (int partsCnt = assignment.size(), p = 0; p < partsCnt; p++) { + isPrimary = true; + + for (ClusterNode node : assignment.get(p)) { + UUID id = node.id(); + + Map> tmp = isPrimary ? tmpPrimary : tmpBackup; + + /* + https://issues.apache.org/jira/browse/IGNITE-4554 BitSet performs better than HashSet at most cases. + However with 65k partition and high number of nodes (700+) BitSet is loosing HashSet. + We need to replace it with sparse bitsets. + */ + tmp.computeIfAbsent(id, uuid -> + !IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION ? new BitSetIntSet() : new HashSet<>() + ).add(p); + + isPrimary = false; + } + } + + primary = Collections.unmodifiableMap(tmpPrimary); + backup = Collections.unmodifiableMap(tmpBackup); + } + + /** + * @param topVer Topology version. + * @param aff Assignment to copy from. + */ + GridAffinityAssignmentV2(AffinityTopologyVersion topVer, GridAffinityAssignmentV2 aff) { + this.topVer = topVer; + + assignment = aff.assignment; + idealAssignment = aff.idealAssignment; + primary = aff.primary; + backup = aff.backup; + + clientEvtChange = true; + } + + /** {@inheritDoc} */ + @Override public boolean clientEventChange() { + return clientEvtChange; + } + + /** + * @return Unmodifiable ideal affinity assignment computed by affinity function. + */ + @Override public List> idealAssignment() { + return idealAssignment; + } + + /** + * @return Unmodifiable affinity assignment. + */ + @Override public List> assignment() { + return assignment; + } + + /** + * @return Topology version. + */ + @Override public AffinityTopologyVersion topologyVersion() { + return topVer; + } + + /** + * Get affinity nodes for partition. + * + * @param part Partition. + * @return Affinity nodes. + */ + @Override public List get(int part) { + assert part >= 0 && part < assignment.size() : "Affinity partition is out of range" + + " [part=" + part + ", partitions=" + assignment.size() + ']'; + + return assignment.get(part); + } + + /** + * Get affinity node IDs for partition as unmodifiable collection. + * Depending on AFFINITY_BACKUPS_THRESHOLD we returned newly allocated HashSet or view on List. + * @param part Partition. + * @return Affinity nodes IDs. + */ + @Override public Collection getIds(int part) { + assert part >= 0 && part < assignment.size() : "Affinity partition is out of range" + + " [part=" + part + ", partitions=" + assignment.size() + ']'; + + if (IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION) + return getOrCreateAssignmentsIds(part); + else { + List nodes = assignment.get(part); + + return nodes.size() > GridAffinityAssignmentV2.IGNITE_AFFINITY_BACKUPS_THRESHOLD + ? getOrCreateAssignmentsIds(part) + : F.viewReadOnly(nodes, F.node2id()); + } + } + + /** + * + * @param part Partition ID. + * @return Collection of UUIDs. + */ + private Collection getOrCreateAssignmentsIds(int part) { + List> assignmentIds0 = assignmentIds; + + if (assignmentIds0 == null) { + assignmentIds0 = new ArrayList<>(assignment.size()); + + for (List assignmentPart : assignment) + assignmentIds0.add(assignments2ids(assignmentPart)); + + assignmentIds = assignmentIds0; + } + + return assignmentIds0.get(part); + } + + /** {@inheritDoc} */ + @Override public Set nodes() { + Set res = nodes; + + if (res == null) { + res = new HashSet<>(); + + for (int p = 0; p < assignment.size(); p++) { + List nodes = assignment.get(p); + + if (!nodes.isEmpty()) + res.addAll(nodes); + } + + nodes = Collections.unmodifiableSet(res); + } + + return res; + } + + /** {@inheritDoc} */ + @Override public Set primaryPartitionNodes() { + Set res = primaryPartsNodes; + + if (res == null) { + res = new HashSet<>(); + + for (int p = 0; p < assignment.size(); p++) { + List nodes = assignment.get(p); + + if (!nodes.isEmpty()) + res.add(nodes.get(0)); + } + + primaryPartsNodes = Collections.unmodifiableSet(res); + } + + return res; + } + + /** + * Get primary partitions for specified node ID. + * + * @param nodeId Node ID to get primary partitions for. + * @return Primary partitions for specified node ID. + */ + @Override public Set primaryPartitions(UUID nodeId) { + Set set = primary.get(nodeId); + + return set == null ? Collections.emptySet() : Collections.unmodifiableSet(set); + } + + /** + * Get backup partitions for specified node ID. + * + * @param nodeId Node ID to get backup partitions for. + * @return Backup partitions for specified node ID. + */ + @Override public Set backupPartitions(UUID nodeId) { + Set set = backup.get(nodeId); + + return set == null ? Collections.emptySet() : Collections.unmodifiableSet(set); + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return topVer.hashCode(); + } + + /** {@inheritDoc} */ + @SuppressWarnings("SimplifiableIfStatement") + @Override public boolean equals(Object o) { + if (o == this) + return true; + + if (!(o instanceof AffinityAssignment)) + return false; + + return topVer.equals(((AffinityAssignment)o).topologyVersion()); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(GridAffinityAssignmentV2.class, this, super.toString()); + } + + /** {@inheritDoc} */ + @Override protected void writeExternalData(ObjectOutput out) throws IOException { + out.writeObject(topVer); + + U.writeCollection(out, assignment); + + U.writeMap(out, primary); + + U.writeMap(out, backup); + } + + /** {@inheritDoc} */ + @Override protected void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + topVer = (AffinityTopologyVersion)in.readObject(); + + assignment = U.readList(in); + + primary = U.readMap(in); + + backup = U.readMap(in); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityProcessor.java index 128eaf0699ef6..66f51efd06f83 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityProcessor.java @@ -26,9 +26,10 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ConcurrentSkipListMap; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; +import org.apache.ignite.IgniteLogger; import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cache.affinity.Affinity; @@ -59,15 +60,14 @@ import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; +import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.lang.IgnitePredicate; import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; -import java.util.concurrent.ConcurrentHashMap; import static org.apache.ignite.cache.CacheMode.LOCAL; import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; -import static org.apache.ignite.events.EventType.EVT_NODE_JOINED; import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; import static org.apache.ignite.internal.GridClosureCallMode.BROADCAST; import static org.apache.ignite.internal.processors.affinity.GridAffinityUtils.affinityJob; @@ -86,15 +86,18 @@ public class GridAffinityProcessor extends GridProcessorAdapter { /** Time to wait between errors (in milliseconds). */ private static final long ERROR_WAIT = 500; + /** Log. */ + private final IgniteLogger log; + /** Affinity map. */ - private final ConcurrentMap> affMap = new ConcurrentHashMap<>(); + private final ConcurrentSkipListMap> affMap = new ConcurrentSkipListMap<>(); /** Listener. */ private final GridLocalEventListener lsnr = new GridLocalEventListener() { @Override public void onEvent(Event evt) { int evtType = evt.type(); - assert evtType == EVT_NODE_FAILED || evtType == EVT_NODE_LEFT || evtType == EVT_NODE_JOINED; + assert evtType == EVT_NODE_FAILED || evtType == EVT_NODE_LEFT; if (affMap.isEmpty()) return; // Skip empty affinity map. @@ -102,26 +105,24 @@ public class GridAffinityProcessor extends GridProcessorAdapter { final DiscoveryEvent discoEvt = (DiscoveryEvent)evt; // Clean up affinity functions if such cache no more exists. - if (evtType == EVT_NODE_FAILED || evtType == EVT_NODE_LEFT) { - final Collection caches = ctx.cache().cacheNames(); + final Collection caches = ctx.cache().cacheNames(); - final Collection rmv = new HashSet<>(); + final Collection rmv = new HashSet<>(); - for (AffinityAssignmentKey key : affMap.keySet()) { - if (!caches.contains(key.cacheName) || key.topVer.topologyVersion() < discoEvt.topologyVersion() - 10) - rmv.add(key); - } + for (AffinityAssignmentKey key : affMap.keySet()) { + if (!caches.contains(key.cacheName) || key.topVer.topologyVersion() < discoEvt.topologyVersion() - 10) + rmv.add(key); + } - if (!rmv.isEmpty()) { - ctx.timeout().addTimeoutObject( - new GridTimeoutObjectAdapter( - IgniteUuid.fromUuid(ctx.localNodeId()), - AFFINITY_MAP_CLEAN_UP_DELAY) { - @Override public void onTimeout() { - affMap.keySet().removeAll(rmv); - } - }); - } + if (!rmv.isEmpty()) { + ctx.timeout().addTimeoutObject( + new GridTimeoutObjectAdapter( + IgniteUuid.fromUuid(ctx.localNodeId()), + AFFINITY_MAP_CLEAN_UP_DELAY) { + @Override public void onTimeout() { + affMap.keySet().removeAll(rmv); + } + }); } } }; @@ -131,11 +132,13 @@ public class GridAffinityProcessor extends GridProcessorAdapter { */ public GridAffinityProcessor(GridKernalContext ctx) { super(ctx); + + log = ctx.log(GridAffinityProcessor.class); } /** {@inheritDoc} */ @Override public void start() throws IgniteCheckedException { - ctx.event().addLocalEventListener(lsnr, EVT_NODE_FAILED, EVT_NODE_LEFT, EVT_NODE_JOINED); + ctx.event().addLocalEventListener(lsnr, EVT_NODE_FAILED, EVT_NODE_LEFT); } /** {@inheritDoc} */ @@ -143,6 +146,11 @@ public GridAffinityProcessor(GridKernalContext ctx) { ctx.event().removeLocalEventListener(lsnr); } + /** {@inheritDoc} */ + @Override public void onDisconnected(IgniteFuture reconnectFut) throws IgniteCheckedException { + affMap.clear(); + } + /** * @param cacheName Cache name. * @param key Key. @@ -212,6 +220,34 @@ public int partition0(String cacheName, Object key, @Nullable AffinityInfo aff) return affInfo != null ? F.first(affInfo.assignment().get(partId)) : null; } + /** + * Removes cached affinity instances with affinity topology versions less than {@code topVer}. + * + * @param topVer topology version. + */ + public void removeCachedAffinity(AffinityTopologyVersion topVer) { + assert topVer != null; + + int oldSize = affMap.size(); + + Iterator>> it = + affMap.headMap(new AffinityAssignmentKey(topVer)).entrySet().iterator(); + + while (it.hasNext()) { + Map.Entry> entry = it.next(); + + assert entry.getValue() != null; + + if (!entry.getValue().isDone()) + continue; + + it.remove(); + } + + if (log.isDebugEnabled()) + log.debug("Affinity cached values were cleared: " + (oldSize - affMap.size())); + } + /** * Maps keys to nodes for given cache. @@ -358,6 +394,9 @@ private Map> keysToNodes(@Nullable final String c @SuppressWarnings("ErrorNotRethrown") @Nullable private AffinityInfo affinityCache(final String cacheName, AffinityTopologyVersion topVer) throws IgniteCheckedException { + + assert cacheName != null; + AffinityAssignmentKey key = new AffinityAssignmentKey(cacheName, topVer); IgniteInternalFuture fut = affMap.get(key); @@ -382,15 +421,13 @@ private Map> keysToNodes(@Nullable final String c } try { - GridAffinityAssignment assign = assign0 instanceof GridAffinityAssignment ? - (GridAffinityAssignment)assign0 : - new GridAffinityAssignment(topVer, assign0.assignment(), assign0.idealAssignment()); - + // using legacy GridAffinityAssignment for compatibility. AffinityInfo info = new AffinityInfo( cctx.config().getAffinity(), cctx.config().getAffinityMapper(), - assign, - cctx.cacheObjectContext()); + new GridAffinityAssignment(topVer, assign0.assignment(), assign0.idealAssignment()), + cctx.cacheObjectContext() + ); IgniteInternalFuture old = affMap.putIfAbsent(key, new GridFinishedFuture<>(info)); @@ -658,7 +695,7 @@ private GridAffinityAssignment assignment() { /** * */ - private static class AffinityAssignmentKey { + private static class AffinityAssignmentKey implements Comparable { /** */ private String cacheName; @@ -669,11 +706,20 @@ private static class AffinityAssignmentKey { * @param cacheName Cache name. * @param topVer Topology version. */ - private AffinityAssignmentKey(String cacheName, @NotNull AffinityTopologyVersion topVer) { + private AffinityAssignmentKey(@NotNull String cacheName, @NotNull AffinityTopologyVersion topVer) { this.cacheName = cacheName; this.topVer = topVer; } + /** + * Current constructor should be used only in removeCachedAffinity for creating of the special keys for removing. + * + * @param topVer Topology version. + */ + private AffinityAssignmentKey(@NotNull AffinityTopologyVersion topVer) { + this.topVer = topVer; + } + /** {@inheritDoc} */ @Override public boolean equals(Object o) { if (this == o) @@ -700,6 +746,32 @@ private AffinityAssignmentKey(String cacheName, @NotNull AffinityTopologyVersion @Override public String toString() { return S.toString(AffinityAssignmentKey.class, this); } + + /** {@inheritDoc} */ + @Override public int compareTo(AffinityAssignmentKey o) { + assert o != null; + + if (this == o) + return 0; + + int res = this.topVer.compareTo(o.topVer); + + // Key with null cache name must be less than any key with not null cache name for the same topVer. + if (res == 0) { + if (cacheName == null && o.cacheName != null) + return -1; + + if (cacheName != null && o.cacheName == null) + return 1; + + if (cacheName == null && o.cacheName == null) + return 0; + + return cacheName.compareTo(o.cacheName); + } + + return res; + } } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityUtils.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityUtils.java index abd5292799958..c43ab39ff349c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityUtils.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/GridAffinityUtils.java @@ -182,14 +182,12 @@ public AffinityJob() { AffinityAssignment assign0 = cctx.affinity().assignment(topVer); - GridAffinityAssignment assign = assign0 instanceof GridAffinityAssignment ? - (GridAffinityAssignment)assign0 : - new GridAffinityAssignment(topVer, assign0.assignment(), assign0.idealAssignment()); - + //using legacy GridAffinityAssignment for compatibility. return F.t( affinityMessage(ctx, cctx.config().getAffinity()), affinityMessage(ctx, cctx.config().getAffinityMapper()), - assign); + new GridAffinityAssignment(topVer, assign0.assignment(), assign0.idealAssignment()) + ); } /** {@inheritDoc} */ @@ -204,4 +202,4 @@ public AffinityJob() { topVer = (AffinityTopologyVersion)in.readObject(); } } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignment.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignment.java index 94eaab4bf54b1..19bd88c17e6a0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignment.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignment.java @@ -1,184 +1,37 @@ /* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.ignite.internal.processors.affinity; -import org.apache.ignite.cluster.ClusterNode; -import org.apache.ignite.internal.util.typedef.F; -import org.apache.ignite.internal.util.typedef.internal.S; -import org.apache.ignite.internal.util.typedef.internal.U; - -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.UUID; - /** - * + * Interface for historical calculated affinity assignment. */ -@SuppressWarnings("ForLoopReplaceableByForEach") -public class HistoryAffinityAssignment implements AffinityAssignment { - /** */ - private final AffinityTopologyVersion topVer; - - /** */ - private final List> assignment; - - /** */ - private final List> idealAssignment; - - /** */ - private final boolean clientEvtChange; - +public interface HistoryAffinityAssignment extends AffinityAssignment { /** - * @param assign Assignment. + * Should return true if instance is "heavy" and should be taken into account during history size management. + * + * @return true if adding this instance to history should trigger size check and possible cleanup. */ - public HistoryAffinityAssignment(GridAffinityAssignment assign) { - this.topVer = assign.topologyVersion(); - this.assignment = assign.assignment(); - this.idealAssignment = assign.idealAssignment(); - this.clientEvtChange = assign.clientEventChange(); - } - - /** {@inheritDoc} */ - @Override public boolean clientEventChange() { - return clientEvtChange; - } - - /** {@inheritDoc} */ - @Override public List> idealAssignment() { - return idealAssignment; - } - - /** {@inheritDoc} */ - @Override public List> assignment() { - return assignment; - } - - /** {@inheritDoc} */ - @Override public AffinityTopologyVersion topologyVersion() { - return topVer; - } - - /** {@inheritDoc} */ - @Override public List get(int part) { - assert part >= 0 && part < assignment.size() : "Affinity partition is out of range" + - " [part=" + part + ", partitions=" + assignment.size() + ']'; - - return assignment.get(part); - } - - /** {@inheritDoc} */ - @Override public HashSet getIds(int part) { - assert part >= 0 && part < assignment.size() : "Affinity partition is out of range" + - " [part=" + part + ", partitions=" + assignment.size() + ']'; - - List nodes = assignment.get(part); - - HashSet ids = U.newHashSet(nodes.size()); - - for (int i = 0; i < nodes.size(); i++) - ids.add(nodes.get(i).id()); - - return ids; - } - - /** {@inheritDoc} */ - @Override public Set nodes() { - Set res = new HashSet<>(); - - for (int p = 0; p < assignment.size(); p++) { - List nodes = assignment.get(p); - - if (!F.isEmpty(nodes)) - res.addAll(nodes); - } + public boolean requiresHistoryCleanup(); - return res; - } - - /** {@inheritDoc} */ - @Override public Set primaryPartitionNodes() { - Set res = new HashSet<>(); - - for (int p = 0; p < assignment.size(); p++) { - List nodes = assignment.get(p); - - if (!F.isEmpty(nodes)) - res.add(nodes.get(0)); - } - - return res; - } - - /** {@inheritDoc} */ - @Override public Set primaryPartitions(UUID nodeId) { - Set res = new HashSet<>(); - - for (int p = 0; p < assignment.size(); p++) { - List nodes = assignment.get(p); - - if (!F.isEmpty(nodes) && nodes.get(0).id().equals(nodeId)) - res.add(p); - } - - return res; - } - - /** {@inheritDoc} */ - @Override public Set backupPartitions(UUID nodeId) { - Set res = new HashSet<>(); - - for (int p = 0; p < assignment.size(); p++) { - List nodes = assignment.get(p); - - for (int i = 1; i < nodes.size(); i++) { - ClusterNode node = nodes.get(i); - - if (node.id().equals(nodeId)) { - res.add(p); - - break; - } - } - } - - return res; - } - - /** {@inheritDoc} */ - @Override public int hashCode() { - return topVer.hashCode(); - } - - /** {@inheritDoc} */ - @SuppressWarnings("SimplifiableIfStatement") - @Override public boolean equals(Object o) { - if (o == this) - return true; - - if (o == null || !(o instanceof AffinityAssignment)) - return false; - - return topVer.equals(((AffinityAssignment)o).topologyVersion()); - } - - /** {@inheritDoc} */ - @Override public String toString() { - return S.toString(HistoryAffinityAssignment.class, this); - } + /** + * In case this instance is lightweight wrapper of another instance, this method should return reference + * to an original one. Otherwise, it should return this reference. + * + * @return Original instance of this if not applicable. + */ + public HistoryAffinityAssignment origin(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignmentImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignmentImpl.java new file mode 100644 index 0000000000000..f3c0f31e53487 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignmentImpl.java @@ -0,0 +1,358 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.affinity; + +import java.util.AbstractList; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.util.BitSetIntSet; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Heap-space optimized version of calculated affinity assignment. + */ +@SuppressWarnings("ForLoopReplaceableByForEach") +public class HistoryAffinityAssignmentImpl implements HistoryAffinityAssignment { + /** */ + private final AffinityTopologyVersion topVer; + + /** */ + private final List> assignment; + + /** */ + private final List> idealAssignment; + + /** */ + private final ClusterNode[] nodes; + + /** Ideal assignments are stored as sequences of indexes in nodes array. */ + private final char[] idealParts; + + /** Diff with ideal. */ + private final Map assignmentDiff; + + /** + * @param assign Assignment. + * @param backups Backups. + */ + public HistoryAffinityAssignmentImpl(AffinityAssignment assign, int backups) { + topVer = assign.topologyVersion(); + + if (IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION || backups > IGNITE_AFFINITY_BACKUPS_THRESHOLD) { + assignment = assign.assignment(); + + idealAssignment = assign.idealAssignment(); + + nodes = null; + + idealParts = null; + + assignmentDiff = null; + + return; + } + + List> assignment = assign.assignment(); + List> idealAssignment = assign.idealAssignment(); + + int min = Integer.MAX_VALUE; + int max = 0; + + for (List nodes : idealAssignment) { // Estimate required size. + int size = nodes.size(); + + if (size > max) + max = size; + + if (size < min) + min = size; + } + + if (max != min) { + this.assignment = assign.assignment(); + + this.idealAssignment = assign.idealAssignment(); + + nodes = null; + + idealParts = null; + + assignmentDiff = null; + + return; + } + + int cpys = max; + + boolean same = assignment == idealAssignment; + + int partsCnt = assignment.size(); + + idealParts = new char[partsCnt * cpys]; + + Map orderMap = new HashMap<>(); + + char order = 1; // Char type is used as unsigned short to avoid conversions. + + assignmentDiff = new HashMap<>(); + + for (int p = 0; p < assignment.size(); p++) { + List curr = assignment.get(p); + List ideal = idealAssignment.get(p); + + for (int i = 0; i < ideal.size(); i++) { + ClusterNode node = ideal.get(i); + + Character nodeOrder = orderMap.get(node); + + if (nodeOrder == null) + orderMap.put(node, (nodeOrder = order++)); + + idealParts[p * cpys + i] = nodeOrder; + } + + if (!same && !curr.equals(ideal)) { + char[] idx = new char[curr.size()]; + + assignmentDiff.put(p, idx); + + for (int i = 0; i < curr.size(); i++) { + ClusterNode node = curr.get(i); + + Character nodeOrder = orderMap.get(node); + + if (nodeOrder == null) + orderMap.put(node, (nodeOrder = order++)); + + idx[i] = nodeOrder; + } + } + } + + // Fill array according to assigned order. + nodes = orderMap.keySet().stream().toArray(ClusterNode[]::new); + + Arrays.sort(nodes, (o1, o2) -> orderMap.get(o1).compareTo(orderMap.get(o2))); + + this.idealAssignment = new AbstractList>() { + @Override public List get(int idx) { + return partitionNodes(idx, true, cpys); + } + + @Override public int size() { + return partsCnt; + } + }; + + this.assignment = same ? this.idealAssignment : new AbstractList>() { + @Override public List get(int idx) { + return partitionNodes(idx, false, cpys); + } + + @Override public int size() { + return partsCnt; + } + }; + + assert this.assignment.equals(assign.assignment()) : "new=" + this.assignment + ", old=" + assign.assignment(); + + assert this.idealAssignment.equals(assign.idealAssignment()) : + "new=" + this.idealAssignment + ", old=" + assign.idealAssignment(); + } + + /** + * @param p Partion. + * @param ideal {@code True} for ideal assignment. + * @param cpys Copies. + */ + private List partitionNodes(int p, boolean ideal, int cpys) { + char[] order; + + if (!ideal && (order = assignmentDiff.get(p)) != null) { + List ret = new ArrayList<>(order.length); + + for (int i = 0; i < order.length; i++) + ret.add(nodes[order[i] - 1]); + + return ret; + } + + List ret = new ArrayList<>(cpys); + + for (int i = 0; i < cpys; i++) { + char ord = idealParts[p * cpys + i]; + + if (ord == 0) // Zero + break; + + ret.add(nodes[ord - 1]); + } + + return ret; + } + + /** {@inheritDoc} */ + @Override public boolean clientEventChange() { + return false; + } + + /** {@inheritDoc} */ + @SuppressWarnings("unchecked") + @Override public List> idealAssignment() { + return idealAssignment; + } + + /** {@inheritDoc} */ + @Override public List> assignment() { + return assignment; + } + + /** {@inheritDoc} */ + @Override public AffinityTopologyVersion topologyVersion() { + return topVer; + } + + /** {@inheritDoc} */ + @Override public List get(int part) { + assert part >= 0 && part < assignment.size() : "Affinity partition is out of range" + + " [part=" + part + ", partitions=" + assignment.size() + ']'; + + return assignment.get(part); + } + + /** {@inheritDoc} */ + @Override public Collection getIds(int part) { + assert part >= 0 && part < assignment.size() : "Affinity partition is out of range" + + " [part=" + part + ", partitions=" + assignment.size() + ']'; + + if (IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION) + return assignments2ids(assignment.get(part)); + else { + List nodes = assignment.get(part); + + return nodes.size() > AffinityAssignment.IGNITE_AFFINITY_BACKUPS_THRESHOLD + ? assignments2ids(nodes) + : F.viewReadOnly(nodes, F.node2id()); + } + } + + /** {@inheritDoc} */ + @Override public Set nodes() { + Set res = new HashSet<>(); + + for (int p = 0; p < assignment.size(); p++) { + List nodes = assignment.get(p); + + if (!F.isEmpty(nodes)) + res.addAll(nodes); + } + + return Collections.unmodifiableSet(res); + } + + /** {@inheritDoc} */ + @Override public Set primaryPartitionNodes() { + Set res = new HashSet<>(); + + for (int p = 0; p < assignment.size(); p++) { + List nodes = assignment.get(p); + + if (!F.isEmpty(nodes)) + res.add(nodes.get(0)); + } + + return Collections.unmodifiableSet(res); + } + + /** {@inheritDoc} */ + @Override public Set primaryPartitions(UUID nodeId) { + Set res = IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION ? new HashSet<>() : new BitSetIntSet(); + + for (int p = 0; p < assignment.size(); p++) { + List nodes = assignment.get(p); + + if (!F.isEmpty(nodes) && nodes.get(0).id().equals(nodeId)) + res.add(p); + } + + return Collections.unmodifiableSet(res); + } + + /** {@inheritDoc} */ + @Override public Set backupPartitions(UUID nodeId) { + Set res = IGNITE_DISABLE_AFFINITY_MEMORY_OPTIMIZATION ? new HashSet<>() : new BitSetIntSet(); + + for (int p = 0; p < assignment.size(); p++) { + List nodes = assignment.get(p); + + for (int i = 1; i < nodes.size(); i++) { + ClusterNode node = nodes.get(i); + + if (node.id().equals(nodeId)) { + res.add(p); + + break; + } + } + } + + return Collections.unmodifiableSet(res); + } + + /** {@inheritDoc} */ + @Override public boolean requiresHistoryCleanup() { + return true; + } + + /** {@inheritDoc} */ + @Override public HistoryAffinityAssignment origin() { + return this; + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return topVer.hashCode(); + } + + /** {@inheritDoc} */ + @SuppressWarnings("SimplifiableIfStatement") + @Override public boolean equals(Object o) { + if (o == this) + return true; + + if (o == null || !(o instanceof AffinityAssignment)) + return false; + + return topVer.equals(((AffinityAssignment)o).topologyVersion()); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(HistoryAffinityAssignmentImpl.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignmentShallowCopy.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignmentShallowCopy.java new file mode 100644 index 0000000000000..a6805fdb44354 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/affinity/HistoryAffinityAssignmentShallowCopy.java @@ -0,0 +1,112 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.ignite.internal.processors.affinity; + +import java.util.Collection; +import java.util.List; +import java.util.Set; +import java.util.UUID; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Shallow copy that contains reference to delegate {@link HistoryAffinityAssignment}. + */ +public class HistoryAffinityAssignmentShallowCopy implements HistoryAffinityAssignment { + /** History assignment. */ + private final HistoryAffinityAssignment histAssignment; + + /** Topology version. */ + private final AffinityTopologyVersion topVer; + + /** + * @param histAssignment History assignment. + * @param topVer Topology version. + */ + public HistoryAffinityAssignmentShallowCopy( + HistoryAffinityAssignment histAssignment, + AffinityTopologyVersion topVer + ) { + this.histAssignment = histAssignment; + this.topVer = topVer; + } + + /** {@inheritDoc} */ + @Override public boolean requiresHistoryCleanup() { + return false; + } + + /** {@inheritDoc} */ + @Override public boolean clientEventChange() { + return true; + } + + /** {@inheritDoc} */ + @Override public List> idealAssignment() { + return histAssignment.idealAssignment(); + } + + /** {@inheritDoc} */ + @Override public List> assignment() { + return histAssignment.assignment(); + } + + /** {@inheritDoc} */ + @Override public AffinityTopologyVersion topologyVersion() { + return topVer; + } + + /** {@inheritDoc} */ + @Override public List get(int part) { + return histAssignment.get(part); + } + + /** {@inheritDoc} */ + @Override public Collection getIds(int part) { + return histAssignment.getIds(part); + } + + /** {@inheritDoc} */ + @Override public Set nodes() { + return histAssignment.nodes(); + } + + /** {@inheritDoc} */ + @Override public Set primaryPartitionNodes() { + return histAssignment.primaryPartitionNodes(); + } + + /** {@inheritDoc} */ + @Override public Set primaryPartitions(UUID nodeId) { + return histAssignment.primaryPartitions(nodeId); + } + + /** {@inheritDoc} */ + @Override public Set backupPartitions(UUID nodeId) { + return histAssignment.backupPartitions(nodeId); + } + + /** {@inheritDoc} */ + @Override public HistoryAffinityAssignment origin() { + return histAssignment; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(HistoryAffinityAssignmentShallowCopy.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/IgniteAuthenticationProcessor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/IgniteAuthenticationProcessor.java index c025e8c0b5f14..9076c2e222e7b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/IgniteAuthenticationProcessor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/IgniteAuthenticationProcessor.java @@ -34,7 +34,6 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.cluster.ClusterNode; -import org.apache.ignite.events.DiscoveryEvent; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.GridTopic; import org.apache.ignite.internal.IgniteInternalFuture; @@ -44,7 +43,6 @@ import org.apache.ignite.internal.managers.communication.GridIoPolicy; import org.apache.ignite.internal.managers.communication.GridMessageListener; import org.apache.ignite.internal.managers.discovery.CustomEventListener; -import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; import org.apache.ignite.internal.managers.eventstorage.DiscoveryEventListener; import org.apache.ignite.internal.processors.GridProcessorAdapter; @@ -68,6 +66,8 @@ import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.spi.IgniteNodeValidationResult; import org.apache.ignite.spi.discovery.DiscoveryDataBag; +import org.apache.ignite.spi.discovery.DiscoverySpi; +import org.apache.ignite.spi.discovery.tcp.TcpDiscoverySpi; import org.apache.ignite.thread.IgniteThreadPoolExecutor; import org.jetbrains.annotations.Nullable; @@ -102,7 +102,7 @@ public class IgniteAuthenticationProcessor extends GridProcessorAdapter implemen private final Object mux = new Object(); /** Active operations. Collects to send on joining node. */ - private Map activeOps = Collections.synchronizedMap(new LinkedHashMap<>()); + private final Map activeOps = Collections.synchronizedMap(new LinkedHashMap<>()); /** User map. */ private ConcurrentMap users; @@ -139,7 +139,7 @@ public class IgniteAuthenticationProcessor extends GridProcessorAdapter implemen private DiscoveryEventListener discoLsnr; /** Node activate future. */ - private GridFutureAdapter activateFut = new GridFutureAdapter<>(); + private final GridFutureAdapter activateFut = new GridFutureAdapter<>(); /** Validate error. */ private String validateErr; @@ -149,16 +149,14 @@ public class IgniteAuthenticationProcessor extends GridProcessorAdapter implemen */ public IgniteAuthenticationProcessor(GridKernalContext ctx) { super(ctx); - - isEnabled = ctx.config().isAuthenticationEnabled(); - - ctx.internalSubscriptionProcessor().registerMetastorageListener(this); } /** {@inheritDoc} */ @Override public void start() throws IgniteCheckedException { super.start(); + isEnabled = ctx.config().isAuthenticationEnabled(); + if (isEnabled && !GridCacheUtils.isPersistenceEnabled(ctx.config())) { isEnabled = false; @@ -166,6 +164,8 @@ public IgniteAuthenticationProcessor(GridKernalContext ctx) { + " Check the DataRegionConfiguration"); } + ctx.internalSubscriptionProcessor().registerMetastorageListener(this); + ctx.addNodeAttribute(IgniteNodeAttributes.ATTR_AUTHENTICATION_ENABLED, isEnabled); GridDiscoveryManager discoMgr = ctx.discovery(); @@ -176,38 +176,34 @@ public IgniteAuthenticationProcessor(GridKernalContext ctx) { discoMgr.setCustomEventListener(UserAcceptedMessage.class, new UserAcceptedListener()); - discoLsnr = new DiscoveryEventListener() { - @Override public void onEvent(DiscoveryEvent evt, DiscoCache discoCache) { - if (!isEnabled || ctx.isStopping()) - return; + discoLsnr = (evt, discoCache) -> { + if (!isEnabled || ctx.isStopping()) + return; - switch (evt.type()) { - case EVT_NODE_LEFT: - case EVT_NODE_FAILED: - onNodeLeft(evt.eventNode().id()); - break; + switch (evt.type()) { + case EVT_NODE_LEFT: + case EVT_NODE_FAILED: + onNodeLeft(evt.eventNode().id()); + break; - case EVT_NODE_JOINED: - onNodeJoin(evt.eventNode()); - break; - } + case EVT_NODE_JOINED: + onNodeJoin(evt.eventNode()); + break; } }; ctx.event().addDiscoveryEventListener(discoLsnr, DISCO_EVT_TYPES); - ioLsnr = new GridMessageListener() { - @Override public void onMessage(UUID nodeId, Object msg, byte plc) { - if (!isEnabled || ctx.isStopping()) - return; + ioLsnr = (nodeId, msg, plc) -> { + if (!isEnabled || ctx.isStopping()) + return; - if (msg instanceof UserManagementOperationFinishedMessage) - onFinishMessage(nodeId, (UserManagementOperationFinishedMessage)msg); - else if (msg instanceof UserAuthenticateRequestMessage) - onAuthenticateRequestMessage(nodeId, (UserAuthenticateRequestMessage)msg); - else if (msg instanceof UserAuthenticateResponseMessage) - onAuthenticateResponseMessage((UserAuthenticateResponseMessage)msg); - } + if (msg instanceof UserManagementOperationFinishedMessage) + onFinishMessage(nodeId, (UserManagementOperationFinishedMessage)msg); + else if (msg instanceof UserAuthenticateRequestMessage) + onAuthenticateRequestMessage(nodeId, (UserAuthenticateRequestMessage)msg); + else if (msg instanceof UserAuthenticateResponseMessage) + onAuthenticateResponseMessage((UserAuthenticateResponseMessage)msg); }; ioMgr.addMessageListener(GridTopic.TOPIC_AUTH, ioLsnr); @@ -407,11 +403,8 @@ public void updateUser(String login, String passwd) throws IgniteCheckedExceptio if (!ctx.clientNode()) { users = new ConcurrentHashMap<>(); - Map readUsers = (Map)metastorage.readForPredicate(new IgnitePredicate() { - @Override public boolean apply(String key) { - return key != null && key.startsWith(STORE_USER_PREFIX); - } - }); + Map readUsers = (Map)metastorage.readForPredicate( + (IgnitePredicate)key -> key != null && key.startsWith(STORE_USER_PREFIX)); for (User u : readUsers.values()) users.put(u.name(), u); @@ -451,7 +444,7 @@ public void updateUser(String login, String passwd) throws IgniteCheckedExceptio @Override public void collectGridNodeData(DiscoveryDataBag dataBag) { // 1. Collect users info only on coordinator // 2. Doesn't collect users info to send on client node due to security reason. - if (!isEnabled || !F.eq(ctx.localNodeId(), coordinator().id()) || dataBag.isJoiningNodeClient()) + if (!isEnabled || !isLocalNodeCoordinator() || dataBag.isJoiningNodeClient()) return; synchronized (mux) { @@ -466,6 +459,21 @@ public void updateUser(String login, String passwd) throws IgniteCheckedExceptio } } + /** + * Checks whether local node is coordinator. Nodes that are leaving or failed + * (but are still in topology) are removed from search. + * + * @return {@code true} if local node is coordinator. + */ + private boolean isLocalNodeCoordinator() { + DiscoverySpi spi = ctx.discovery().getInjectedDiscoverySpi(); + + if (spi instanceof TcpDiscoverySpi) + return ((TcpDiscoverySpi)spi).isLocalNodeCoordinator(); + else + return F.eq(ctx.localNodeId(), coordinator().id()); + } + /** {@inheritDoc} */ @Override public void onGridDataReceived(DiscoveryDataBag.GridDiscoveryData data) { initUsrs = (InitialUsersData)data.commonData(); @@ -897,7 +905,10 @@ public void onLocalJoin() { // Can be empty on initial start of PDS cluster (default user will be created and stored after activate) if (!F.isEmpty(initUsrs.usrs)) { - users.clear(); + if (users == null) + users = new ConcurrentHashMap<>(); + else + users.clear(); for (User u : initUsrs.usrs) users.put(u.name(), u); @@ -1272,9 +1283,10 @@ private UserOperationWorker(UserManagementOperation op, UserOperationFinishFutur // Remove failed operation from active operations. activeOps.remove(op.id()); } - - if (sharedCtx != null) - sharedCtx.database().checkpointReadUnlock(); + finally { + if (sharedCtx != null) + sharedCtx.database().checkpointReadUnlock(); + } curOpFinishMsg = msg0; @@ -1297,6 +1309,7 @@ private UserOperationWorker(UserManagementOperation op, UserOperationFinishFutur * Initial users set worker. */ private class RefreshUsersStorageWorker extends GridWorker { + /** */ private final ArrayList newUsrs; /** @@ -1322,11 +1335,8 @@ private RefreshUsersStorageWorker(ArrayList usrs) { sharedCtx.database().checkpointReadLock(); try { - Map existUsrs = (Map)metastorage.readForPredicate(new IgnitePredicate() { - @Override public boolean apply(String key) { - return key != null && key.startsWith(STORE_USER_PREFIX); - } - }); + Map existUsrs = (Map)metastorage.readForPredicate( + (IgnitePredicate)key -> key != null && key.startsWith(STORE_USER_PREFIX)); for (String key : existUsrs.keySet()) metastorage.remove(key); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserAcceptedMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserAcceptedMessage.java index ef87a444cb374..2e2aed9dcf0b1 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserAcceptedMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserAcceptedMessage.java @@ -71,6 +71,11 @@ public class UserAcceptedMessage implements DiscoveryCustomMessage { return false; } + /** {@inheritDoc} */ + @Override public boolean stopProcess() { + return false; + } + /** {@inheritDoc} */ @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, AffinityTopologyVersion topVer, DiscoCache discoCache) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserAuthenticateResponseMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserAuthenticateResponseMessage.java index d86b1ad91a551..e3dee3ce92a4f 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserAuthenticateResponseMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserAuthenticateResponseMessage.java @@ -98,7 +98,6 @@ public IgniteUuid id() { writer.incrementState(); - } return true; @@ -127,6 +126,7 @@ public IgniteUuid id() { return false; reader.incrementState(); + } return reader.afterMessageRead(UserAuthenticateResponseMessage.class); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserProposedMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserProposedMessage.java index 1a0be8ecaa2c6..19f9e82cf18cb 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserProposedMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/authentication/UserProposedMessage.java @@ -71,6 +71,11 @@ public class UserProposedMessage implements DiscoveryServerOnlyCustomMessage { return false; } + /** {@inheritDoc} */ + @Override public boolean stopProcess() { + return false; + } + /** {@inheritDoc} */ @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, AffinityTopologyVersion topVer, DiscoCache discoCache) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCacheWriter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCacheWriter.java index 90714c8308de4..0f7444ceceba0 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCacheWriter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCacheWriter.java @@ -20,7 +20,9 @@ import org.apache.ignite.lang.IgniteBiTuple; import org.apache.ignite.lang.IgniteInClosure; -/** A proxy, which stores given key+value pair to a cache. */ +/** + * A proxy, which stores given key+value pair to a cache. + */ public abstract class BulkLoadCacheWriter implements IgniteInClosure>, AutoCloseable { /** * Returns number of entry updates made by the writer. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvFormat.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvFormat.java index ec1dfd1a889b5..69cb341daacf3 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvFormat.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvFormat.java @@ -22,7 +22,9 @@ import java.util.regex.Pattern; -/** A placeholder for bulk load CSV format parser options. */ +/** + * A placeholder for bulk load CSV format parser options. + */ public class BulkLoadCsvFormat extends BulkLoadFormat { /** Line separator pattern. */ @NotNull public static final Pattern DEFAULT_LINE_SEPARATOR = Pattern.compile("[\r\n]+"); @@ -57,6 +59,9 @@ public class BulkLoadCsvFormat extends BulkLoadFormat { /** Set of escape start characters. */ @Nullable private String escapeChars; + /** File charset. */ + @Nullable private String inputCharsetName; + /** * Returns the name of the format. * @@ -155,4 +160,22 @@ public void commentChars(@Nullable Pattern commentChars) { public void escapeChars(@Nullable String escapeChars) { this.escapeChars = escapeChars; } + + /** + * Returns the input file charset name, null if not specified. + * + * @return The input file charset name, null if not specified. + */ + @Nullable public String inputCharsetName() { + return inputCharsetName; + } + + /** + * Sets the input file charset name. The null here means "not specified". + * + * @param inputCharsetName The input file charset name. + */ + public void inputCharsetName(@Nullable String inputCharsetName) { + this.inputCharsetName = inputCharsetName; + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvParser.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvParser.java index 0511596012477..98e994d1fcde4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvParser.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadCsvParser.java @@ -24,8 +24,12 @@ import org.apache.ignite.internal.processors.bulkload.pipeline.StrListAppenderBlock; import org.apache.ignite.internal.processors.bulkload.pipeline.LineSplitterBlock; +import java.nio.charset.Charset; +import java.nio.charset.IllegalCharsetNameException; +import java.nio.charset.UnsupportedCharsetException; import java.util.LinkedList; import java.util.List; +import org.apache.ignite.internal.processors.query.IgniteSQLException; /** CSV parser for COPY command. */ public class BulkLoadCsvParser extends BulkLoadParser { @@ -41,7 +45,20 @@ public class BulkLoadCsvParser extends BulkLoadParser { * @param format Format options (parsed from COPY command). */ public BulkLoadCsvParser(BulkLoadCsvFormat format) { - inputBlock = new CharsetDecoderBlock(BulkLoadFormat.DEFAULT_INPUT_CHARSET); + try { + Charset charset = format.inputCharsetName() == null ? BulkLoadFormat.DEFAULT_INPUT_CHARSET : + Charset.forName(format.inputCharsetName()); + + inputBlock = new CharsetDecoderBlock(charset); + } + catch (IllegalCharsetNameException e) { + throw new IgniteSQLException("Unknown charset name: '" + format.inputCharsetName() + "': " + + e.getMessage()); + } + catch (UnsupportedCharsetException e) { + throw new IgniteSQLException("Charset is not supported: '" + format.inputCharsetName() + "': " + + e.getMessage()); + } collectorBlock = new StrListAppenderBlock(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadFormat.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadFormat.java index cff93c5788552..bb4beb07eef52 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadFormat.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/BulkLoadFormat.java @@ -19,7 +19,9 @@ import java.nio.charset.Charset; -/** A superclass and a factory for bulk load format options. */ +/** + * A superclass and a factory for bulk load format options. + */ public abstract class BulkLoadFormat { /** The default input charset. */ public static final Charset DEFAULT_INPUT_CHARSET = Charset.forName("UTF-8"); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CharsetDecoderBlock.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CharsetDecoderBlock.java index 5b18def1a37cc..f9031b161745c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CharsetDecoderBlock.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/CharsetDecoderBlock.java @@ -34,6 +34,9 @@ * the erroneous input, appending the coder's replacement value to the output buffer, and resuming the coding operation. */ public class CharsetDecoderBlock extends PipelineBlock { + /** Empty portion. */ + public static final char[] EMPTY_PORTION = new char[0]; + /** Charset decoder */ private final CharsetDecoder charsetDecoder; @@ -67,7 +70,8 @@ public CharsetDecoderBlock(Charset charset) { isEndOfInput = isLastAppend; if (leftover == null && data.length == 0) { - nextBlock.accept(new char[0], isLastAppend); + nextBlock.accept(EMPTY_PORTION, isLastAppend); + return; } @@ -78,8 +82,7 @@ public CharsetDecoderBlock(Charset charset) { else { dataBuf = ByteBuffer.allocate(leftover.length + data.length); - dataBuf.put(leftover) - .put(data); + dataBuf.put(leftover).put(data); dataBuf.flip(); @@ -101,8 +104,9 @@ public CharsetDecoderBlock(Charset charset) { leftover = Arrays.copyOfRange(dataBuf.array(), dataBuf.arrayOffset() + dataBuf.position(), dataBuf.limit()); + // See {@link CharsetDecoder} class javadoc for the protocol. if (isEndOfInput) - charsetDecoder.flush(outBuf); // See {@link CharsetDecoder} class javadoc for the protocol. + charsetDecoder.flush(outBuf); if (outBuf.position() > 0) nextBlock.accept(Arrays.copyOfRange(outBuf.array(), outBuf.arrayOffset(), outBuf.position()), @@ -111,7 +115,8 @@ public CharsetDecoderBlock(Charset charset) { break; } - if (res.isOverflow()) { // Not enough space in the output buffer, flush it and retry. + // Not enough space in the output buffer, flush it and retry. + if (res.isOverflow()) { assert outBuf.position() > 0; nextBlock.accept(Arrays.copyOfRange(outBuf.array(), outBuf.arrayOffset(), outBuf.position()), diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/PipelineBlock.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/PipelineBlock.java index 914b4b4d4aaa4..f0a7339d6425d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/PipelineBlock.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/bulkload/pipeline/PipelineBlock.java @@ -27,7 +27,7 @@ */ public abstract class PipelineBlock { /** The next block in pipeline or null if this block is a terminator. */ - @Nullable protected PipelineBlock nextBlock; + @Nullable PipelineBlock nextBlock; /** * Creates a pipeline block. @@ -35,7 +35,7 @@ public abstract class PipelineBlock { *

(There is no nextBlock argument in the constructor: setting the next block using * {@link #append(PipelineBlock)} method is more convenient. */ - protected PipelineBlock() { + PipelineBlock() { nextBlock = null; } @@ -61,6 +61,7 @@ public PipelineBlock append(PipelineBlock next) { * * @param inputPortion Portion of input. * @param isLastPortion Is this the last portion. + * @throws IgniteCheckedException On error. */ public abstract void accept(I inputPortion, boolean isLastPortion) throws IgniteCheckedException; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinityChangeMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinityChangeMessage.java index fe1014cf6e010..937a8892bdfae 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinityChangeMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinityChangeMessage.java @@ -155,6 +155,11 @@ public AffinityTopologyVersion topologyVersion() { return false; } + /** {@inheritDoc} */ + @Override public boolean stopProcess() { + return false; + } + /** {@inheritDoc} */ @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, AffinityTopologyVersion topVer, DiscoCache discoCache) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java index 6691b132f876f..9594c40ce8c3b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheAffinitySharedManager.java @@ -23,14 +23,17 @@ import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Set; import java.util.UUID; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.stream.Collectors; import javax.cache.CacheException; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteSystemProperties; import org.apache.ignite.cache.affinity.AffinityFunction; import org.apache.ignite.cluster.ClusterNode; @@ -48,16 +51,16 @@ import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache; import org.apache.ignite.internal.processors.cache.distributed.dht.ClientCacheDhtTopologyFuture; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridClientPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAffinityAssignmentResponse; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAssignmentFetchFuture; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.CacheGroupAffinityMessage; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsFullMessage; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridClientPartitionTopology; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; import org.apache.ignite.internal.processors.cluster.DiscoveryDataClusterState; import org.apache.ignite.internal.util.GridLongList; import org.apache.ignite.internal.util.GridPartitionStateMap; @@ -111,7 +114,7 @@ public class CacheAffinitySharedManager extends GridCacheSharedManagerAdap private AffinityTopologyVersion lastAffVer; /** Registered caches (updated from exchange thread). */ - private final CachesInfo caches = new CachesInfo(); + private CachesRegistry cachesRegistry; /** */ private WaitRebalanceInfo waitInfo; @@ -145,6 +148,8 @@ public class CacheAffinitySharedManager extends GridCacheSharedManagerAdap super.start0(); cctx.kernalContext().event().addLocalEventListener(discoLsnr, EVT_NODE_LEFT, EVT_NODE_FAILED); + + cachesRegistry = new CachesRegistry(cctx); } /** @@ -168,7 +173,7 @@ void onDiscoveryEvent(int type, !DiscoveryCustomEvent.requiresCentralizedAffinityAssignment(customMsg)) return; - if ((!CU.clientNode(node) && (type == EVT_NODE_FAILED || type == EVT_NODE_JOINED || type == EVT_NODE_LEFT)) || + if ((!node.isClient() && (type == EVT_NODE_FAILED || type == EVT_NODE_JOINED || type == EVT_NODE_LEFT)) || DiscoveryCustomEvent.requiresCentralizedAffinityAssignment(customMsg)) { synchronized (mux) { assert lastAffVer == null || topVer.compareTo(lastAffVer) > 0 : @@ -182,14 +187,11 @@ void onDiscoveryEvent(int type, /** * Must be called from exchange thread. */ - public void initCachesOnLocalJoin( - Map cacheGroupDescriptors, - Map cacheDescriptors + public IgniteInternalFuture initCachesOnLocalJoin( + Map grpDescs, + Map cacheDescs ) { - // Clean-up in case of client reconnect. - caches.clear(); - - caches.init(cacheGroupDescriptors, cacheDescriptors); + return cachesRegistry.init(grpDescs, cacheDescs); } /** @@ -266,7 +268,7 @@ void checkRebalanceState(GridDhtPartitionTopology top, Integer checkGrpId) { CacheAffinityChangeMessage msg = null; synchronized (mux) { - if (waitInfo == null || !waitInfo.topVer.equals(lastAffVer) ) + if (waitInfo == null || !waitInfo.topVer.equals(lastAffVer)) return; Map partWait = waitInfo.waitGrps.get(checkGrpId); @@ -321,13 +323,31 @@ void checkRebalanceState(GridDhtPartitionTopology top, Integer checkGrpId) { */ public Set waitGroups() { synchronized (mux) { - if (waitInfo == null || !waitInfo.topVer.equals(lastAffVer) ) + if (waitInfo == null || !waitInfo.topVer.equals(lastAffVer)) return Collections.emptySet(); return new HashSet<>(waitInfo.waitGrps.keySet()); } } + /** + * Adds historically rebalancing partitions to wait group. + * Not doing so could trigger late affinity switching before actual rebalancing will finish. + * + * @param grpId Group id. + * @param part Part. + * @param node Node. + * @param topVer Topology version. + */ + public void addToWaitGroup(int grpId, int part, UUID node, AffinityTopologyVersion topVer) { + synchronized (mux) { + if (waitInfo == null) + waitInfo = new WaitRebalanceInfo(topVer); + + waitInfo.add(grpId, part, node, null); + } + } + /** * @param waitInfo Cache rebalance information. * @return Message. @@ -358,14 +378,7 @@ public Set waitGroups() { * @param grp Cache group. */ void onCacheGroupCreated(CacheGroupContext grp) { - if (!grpHolders.containsKey(grp.groupId())) { - cctx.io().addCacheGroupHandler(grp.groupId(), GridDhtAffinityAssignmentResponse.class, - new IgniteBiInClosure() { - @Override public void apply(UUID nodeId, GridDhtAffinityAssignmentResponse res) { - processAffinityAssignmentResponse(nodeId, res); - } - }); - } + // no-op } /** @@ -373,12 +386,14 @@ void onCacheGroupCreated(CacheGroupContext grp) { * @param startReqs Client cache start request. * @return Descriptors for caches to start. */ - @Nullable private List clientCachesToStart(UUID reqId, - Map startReqs) { + @Nullable private List clientCachesToStart( + UUID reqId, + Map startReqs + ) { List startDescs = new ArrayList<>(startReqs.size()); for (DynamicCacheChangeRequest startReq : startReqs.values()) { - DynamicCacheDescriptor desc = caches.cache(CU.cacheId(startReq.cacheName())); + DynamicCacheDescriptor desc = cachesRegistry.cache(CU.cacheId(startReq.cacheName())); if (desc == null) { CacheException err = new CacheException("Failed to start client cache " + @@ -399,23 +414,21 @@ void onCacheGroupCreated(CacheGroupContext grp) { } /** - * @param msg Change request. * @param crd Coordinator flag. + * @param msg Change request. * @param topVer Current topology version. * @param discoCache Discovery data cache. * @return Map of started caches (cache ID to near enabled flag). */ @Nullable private Map processClientCacheStartRequests( - ClientCacheChangeDummyDiscoveryMessage msg, boolean crd, + ClientCacheChangeDummyDiscoveryMessage msg, AffinityTopologyVersion topVer, - DiscoCache discoCache) { + DiscoCache discoCache + ) { Map startReqs = msg.startRequests(); - if (startReqs == null) - return null; - - List startDescs = clientCachesToStart(msg.requestId(), msg.startRequests()); + List startDescs = clientCachesToStart(msg.requestId(), startReqs); if (startDescs == null || startDescs.isEmpty()) { cctx.cache().completeClientCacheChangeFuture(msg.requestId(), null); @@ -425,60 +438,90 @@ void onCacheGroupCreated(CacheGroupContext grp) { Map fetchFuts = U.newHashMap(startDescs.size()); - Set startedCaches = U.newHashSet(startDescs.size()); - Map startedInfos = U.newHashMap(startDescs.size()); - for (DynamicCacheDescriptor desc : startDescs) { - try { - startedCaches.add(desc.cacheName()); + List startCacheInfos = startDescs.stream() + .map(desc -> { + DynamicCacheChangeRequest changeReq = startReqs.get(desc.cacheName()); - DynamicCacheChangeRequest startReq = startReqs.get(desc.cacheName()); + startedInfos.put(desc.cacheId(), changeReq.nearCacheConfiguration() != null); - cctx.cache().prepareCacheStart(desc.cacheConfiguration(), + return new StartCacheInfo( desc, - startReq.nearCacheConfiguration(), + changeReq.nearCacheConfiguration(), topVer, - startReq.disabledAfterStart()); + changeReq.disabledAfterStart() + ); + }).collect(Collectors.toList()); - startedInfos.put(desc.cacheId(), startReq.nearCacheConfiguration() != null); + Set startedCaches = startCacheInfos.stream() + .map(info -> info.getCacheDescriptor().cacheName()) + .collect(Collectors.toSet()); - CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId()); + try { + cctx.cache().prepareStartCaches(startCacheInfos); + } + catch (IgniteCheckedException e) { + cctx.cache().closeCaches(startedCaches, false); + + cctx.cache().completeClientCacheChangeFuture(msg.requestId(), e); + + return null; + } + + Set groupDescs = startDescs.stream() + .map(DynamicCacheDescriptor::groupDescriptor) + .collect(Collectors.toSet()); + + for (CacheGroupDescriptor grpDesc : groupDescs) { + try { + CacheGroupContext grp = cctx.cache().cacheGroup(grpDesc.groupId()); - assert grp != null : desc.groupId(); + assert grp != null : grpDesc.groupId(); assert !grp.affinityNode() || grp.isLocal() : grp.cacheOrGroupName(); - if (!grp.isLocal() && grp.affinity().lastVersion().equals(AffinityTopologyVersion.NONE)) { - assert grp.localStartVersion().equals(topVer) : grp.localStartVersion(); + // Skip for local caches. + if (grp.isLocal()) + continue; - if (crd) { - CacheGroupHolder grpHolder = grpHolders.get(grp.groupId()); + CacheGroupHolder grpHolder = grpHolders.get(grp.groupId()); - assert grpHolder != null && grpHolder.affinity().idealAssignment() != null; + assert !crd || (grpHolder != null && grpHolder.affinity().idealAssignment() != null); - if (grpHolder.client()) { - ClientCacheDhtTopologyFuture topFut = new ClientCacheDhtTopologyFuture(topVer); + if (grpHolder == null) + grpHolder = getOrCreateGroupHolder(topVer, grpDesc); - grp.topology().updateTopologyVersion(topFut, discoCache, -1, false); + // If current node is not client and current node have no aff holder. + if (grpHolder.nonAffNode() && !cctx.localNode().isClient()) { + ClientCacheDhtTopologyFuture topFut = new ClientCacheDhtTopologyFuture(topVer); - grpHolder = new CacheGroupHolder1(grp, grpHolder.affinity()); + grp.topology().updateTopologyVersion(topFut, discoCache, -1, false); - grpHolders.put(grp.groupId(), grpHolder); + grpHolder = new CacheGroupAffNodeHolder(grp, grpHolder.affinity()); - GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId()); + grpHolders.put(grp.groupId(), grpHolder); - if (clientTop != null) { - grp.topology().update(grpHolder.affinity().lastVersion(), - clientTop.partitionMap(true), - clientTop.fullUpdateCounters(), - Collections.emptySet(), - null); - } + GridClientPartitionTopology clientTop = cctx.exchange().clearClientTopology(grp.groupId()); - assert grpHolder.affinity().lastVersion().equals(grp.affinity().lastVersion()); - } + if (clientTop != null) { + grp.topology().update( + grpHolder.affinity().lastVersion(), + clientTop.partitionMap(true), + clientTop.fullUpdateCounters(), + Collections.emptySet(), + null, + null, + null + ); } - else if (!fetchFuts.containsKey(grp.groupId())) { + + assert grpHolder.affinity().lastVersion().equals(grp.affinity().lastVersion()); + } + else if (!crd && !fetchFuts.containsKey(grp.groupId())) { + boolean topVerLessOrNotInitialized = !grp.topology().initialized() || + grp.topology().readyTopologyVersion().compareTo(topVer) < 0; + + if (grp.affinity().lastVersion().compareTo(topVer) < 0 || topVerLessOrNotInitialized) { GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture(cctx, grp.groupId(), topVer, @@ -530,7 +573,7 @@ else if (!fetchFuts.containsKey(grp.groupId())) { grp.topology().updateTopologyVersion(topFut, discoCache, -1, false); - grp.topology().update(topVer, partMap, null, Collections.emptySet(), null); + grp.topology().update(topVer, partMap, null, Collections.emptySet(), null, null, null); topFut.validate(grp, discoCache.allNodes()); } @@ -555,6 +598,8 @@ else if (!fetchFuts.containsKey(grp.groupId())) { cctx.cache().initCacheProxies(topVer, null); + startReqs.keySet().forEach(req -> cctx.cache().completeProxyInitialize(req)); + cctx.cache().completeClientCacheChangeFuture(msg.requestId(), null); return startedInfos; @@ -569,35 +614,33 @@ else if (!fetchFuts.containsKey(grp.groupId())) { private Set processCacheCloseRequests( ClientCacheChangeDummyDiscoveryMessage msg, boolean crd, - AffinityTopologyVersion topVer) { + AffinityTopologyVersion topVer + ) { Set cachesToClose = msg.cachesToClose(); - if (cachesToClose == null) - return null; - Set closed = cctx.cache().closeCaches(cachesToClose, true); - if (crd) { - for (CacheGroupHolder hld : grpHolders.values()) { - if (!hld.client() && cctx.cache().cacheGroup(hld.groupId()) == null) { - int grpId = hld.groupId(); + for (CacheGroupHolder hld : grpHolders.values()) { + if (!hld.nonAffNode() && cctx.cache().cacheGroup(hld.groupId()) == null) { + int grpId = hld.groupId(); - // All client cache groups were stopped, need create 'client' CacheGroupHolder. - CacheGroupHolder grpHolder = grpHolders.remove(grpId); + // All client cache groups were stopped, need create 'client' CacheGroupHolder. + CacheGroupHolder grpHolder = grpHolders.remove(grpId); - assert grpHolder != null && !grpHolder.client() : grpHolder; + assert grpHolder != null && !grpHolder.nonAffNode() : grpHolder; - try { - grpHolder = CacheGroupHolder2.create(cctx, - caches.group(grpId), - topVer, - grpHolder.affinity()); + try { + grpHolder = createHolder( + cctx, + cachesRegistry.group(grpId), + topVer, + grpHolder.affinity() + ); - grpHolders.put(grpId, grpHolder); - } - catch (IgniteCheckedException e) { - U.error(log, "Failed to initialize cache: " + e, e); - } + grpHolders.put(grpId, grpHolder); + } + catch (IgniteCheckedException e) { + U.error(log, "Failed to initialize cache: " + e, e); } } } @@ -608,21 +651,33 @@ private Set processCacheCloseRequests( } /** - * Process client cache start/close requests, called from exchange thread. + * Process non affinity node cache start/close requests, called from exchange thread. * * @param msg Change request. */ - void processClientCachesChanges(ClientCacheChangeDummyDiscoveryMessage msg) { + void processClientCachesRequests(ClientCacheChangeDummyDiscoveryMessage msg) { + // Get ready exchange version. AffinityTopologyVersion topVer = cctx.exchange().readyAffinityVersion(); DiscoCache discoCache = cctx.discovery().discoCache(topVer); - boolean crd = cctx.localNode().equals(discoCache.oldestAliveServerNode()); + ClusterNode node = discoCache.oldestAliveServerNode(); - Map startedCaches = processClientCacheStartRequests(msg, crd, topVer, discoCache); + // Resolve coordinator for specific version. + boolean crd = node != null && node.isLocal(); - Set closedCaches = processCacheCloseRequests(msg, crd, topVer); + Map startedCaches = null; + Set closedCaches = null; + // Check and start caches via dummy message. + if (msg.startRequests() != null) + startedCaches = processClientCacheStartRequests(crd, msg, topVer, discoCache); + + // Check and close caches via dummy message. + if (msg.cachesToClose() != null) + closedCaches = processCacheCloseRequests(msg, crd, topVer); + + // Shedule change message. if (startedCaches != null || closedCaches != null) scheduleClientChangeMessage(startedCaches, closedCaches); } @@ -642,7 +697,7 @@ void sendClientCacheChangesMessage(ClientCacheUpdateTimeout timeoutObj) { clientCacheChanges.remove(); - msg.checkCachesExist(caches.registeredCaches.keySet()); + msg.checkCachesExist(cachesRegistry.allCaches().keySet()); try { if (!msg.empty()) @@ -711,6 +766,8 @@ public void onCustomMessageNoAffinityChange( return; aff.clientEventTopologyChange(evts.lastEvent(), evts.topologyVersion()); + + cctx.exchange().exchangerUpdateHeartbeat(); } }); } @@ -719,14 +776,55 @@ public void onCustomMessageNoAffinityChange( * @param cctx Stopped cache context. */ public void stopCacheOnReconnect(GridCacheContext cctx) { - caches.registeredCaches.remove(cctx.cacheId()); + cachesRegistry.unregisterCache(cctx.cacheId()); } /** * @param grpCtx Stopped cache group context. */ public void stopCacheGroupOnReconnect(CacheGroupContext grpCtx) { - caches.registeredGrps.remove(grpCtx.groupId()); + cachesRegistry.unregisterGroup(grpCtx.groupId()); + } + + /** {@inheritDoc} */ + @Override public void onDisconnected(IgniteFuture reconnectFut) { + Iterator it = grpHolders.keySet().iterator(); + + while (it.hasNext()) { + int grpId = it.next(); + + it.remove(); + + cctx.io().removeHandler(true, grpId, GridDhtAffinityAssignmentResponse.class); + } + + assert grpHolders.isEmpty(); + + super.onDisconnected(reconnectFut); + } + + /** + * Called during the rollback of the exchange partitions procedure in order to stop the given cache even if it's not + * fully initialized (e.g. failed on cache init stage). + * + * @param fut Exchange future. + * @param crd Coordinator flag. + * @param exchActions Cache change requests. + */ + public void forceCloseCaches( + GridDhtPartitionsExchangeFuture fut, + boolean crd, + final ExchangeActions exchActions + ) { + assert exchActions != null && !exchActions.empty() && exchActions.cacheStartRequests().isEmpty() : exchActions; + + IgniteInternalFuture res = cachesRegistry.update(exchActions); + + assert res.isDone() : "There should be no caches to start: " + exchActions; + + processCacheStopRequests(fut, crd, exchActions, true); + + cctx.cache().forceCloseCaches(exchActions); } /** @@ -737,20 +835,83 @@ public void stopCacheGroupOnReconnect(CacheGroupContext grpCtx) { * @param exchActions Cache change requests. * @throws IgniteCheckedException If failed. */ - public void onCacheChangeRequest( + public IgniteInternalFuture onCacheChangeRequest( GridDhtPartitionsExchangeFuture fut, boolean crd, final ExchangeActions exchActions ) throws IgniteCheckedException { assert exchActions != null && !exchActions.empty() : exchActions; - final ExchangeDiscoveryEvents evts = fut.context().events(); - - caches.updateCachesInfo(exchActions); + IgniteInternalFuture res = cachesRegistry.update(exchActions); // Affinity did not change for existing caches. onCustomMessageNoAffinityChange(fut, crd, exchActions); + fut.timeBag().finishGlobalStage("Update caches registry"); + + processCacheStartRequests(fut, crd, exchActions); + + Set stoppedGrps = processCacheStopRequests(fut, crd, exchActions, false); + + if (stoppedGrps != null) { + AffinityTopologyVersion notifyTopVer = null; + + synchronized (mux) { + if (waitInfo != null) { + for (Integer grpId : stoppedGrps) { + boolean rmv = waitInfo.waitGrps.remove(grpId) != null; + + if (rmv) { + notifyTopVer = waitInfo.topVer; + + waitInfo.assignments.remove(grpId); + } + } + } + } + + if (notifyTopVer != null) { + final AffinityTopologyVersion topVer = notifyTopVer; + + cctx.kernalContext().closure().runLocalSafe(new Runnable() { + @Override public void run() { + onCacheGroupStopped(topVer); + } + }); + } + } + + ClientCacheChangeDiscoveryMessage msg = clientCacheChanges.get(); + + if (msg != null) { + msg.checkCachesExist(cachesRegistry.allCaches().keySet()); + + if (msg.empty()) + clientCacheChanges.remove(); + } + + return res; + } + + /** + * Process cache start requests. + * + * @param fut Exchange future. + * @param crd Coordinator flag. + * @param exchActions Cache change requests. + * @throws IgniteCheckedException If failed. + */ + private void processCacheStartRequests( + GridDhtPartitionsExchangeFuture fut, + boolean crd, + final ExchangeActions exchActions + ) throws IgniteCheckedException { + assert exchActions != null && !exchActions.empty() : exchActions; + + final ExchangeDiscoveryEvents evts = fut.context().events(); + + Map startCacheInfos = new LinkedHashMap<>(); + for (ExchangeActions.CacheActionData action : exchActions.cacheStartRequests()) { DynamicCacheDescriptor cacheDesc = action.descriptor(); @@ -770,7 +931,7 @@ public void onCacheChangeRequest( assert cctx.cacheContext(cacheDesc.cacheId()) == null : "Starting cache has not null context: " + cacheDesc.cacheName(); - IgniteCacheProxyImpl cacheProxy = (IgniteCacheProxyImpl) cctx.cache().jcacheProxy(req.cacheName()); + IgniteCacheProxyImpl cacheProxy = cctx.cache().jcacheProxy(req.cacheName(), false); // If it has proxy then try to start it if (cacheProxy != null) { @@ -786,49 +947,102 @@ public void onCacheChangeRequest( } } - try { - if (startCache) { - cctx.cache().prepareCacheStart(req.startCacheConfiguration(), + if (startCache) { + startCacheInfos.put( + new StartCacheInfo( + req.startCacheConfiguration(), cacheDesc, nearCfg, evts.topologyVersion(), - req.disabledAfterStart()); - - if (fut.cacheAddedOnExchange(cacheDesc.cacheId(), cacheDesc.receivedFrom())) { - if (fut.events().discoveryCache().cacheGroupAffinityNodes(cacheDesc.groupId()).isEmpty()) - U.quietAndWarn(log, "No server nodes found for cache client: " + req.cacheName()); - } - } + req.disabledAfterStart() + ), + req + ); } - catch (IgniteCheckedException e) { + } + + Map failedCaches = cctx.cache().prepareStartCachesIfPossible(startCacheInfos.keySet()); + + for (Map.Entry entry : failedCaches.entrySet()) { + if (cctx.localNode().isClient()) { U.error(log, "Failed to initialize cache. Will try to rollback cache start routine. " + - "[cacheName=" + req.cacheName() + ']', e); + "[cacheName=" + entry.getKey().getStartedConfiguration().getName() + ']', entry.getValue()); - cctx.cache().closeCaches(Collections.singleton(req.cacheName()), false); + cctx.cache().closeCaches(Collections.singleton(entry.getKey().getStartedConfiguration().getName()), false); - cctx.cache().completeCacheStartFuture(req, false, e); + cctx.cache().completeCacheStartFuture(startCacheInfos.get(entry.getKey()), false, entry.getValue()); } + else + throw entry.getValue(); } - Set gprs = new HashSet<>(); + Set failedCacheInfos = failedCaches.keySet(); - for (ExchangeActions.CacheActionData action : exchActions.cacheStartRequests()) { - int grpId = action.descriptor().groupId(); + List cacheInfos = startCacheInfos.keySet().stream() + .filter(failedCacheInfos::contains) + .collect(Collectors.toList()); - if (gprs.add(grpId)) { - if (crd) - initStartedGroupOnCoordinator(fut, action.descriptor().groupDescriptor()); - else { - CacheGroupContext grp = cctx.cache().cacheGroup(grpId); + for (StartCacheInfo info : cacheInfos) { + if (fut.cacheAddedOnExchange(info.getCacheDescriptor().cacheId(), info.getCacheDescriptor().receivedFrom())) { + if (fut.events().discoveryCache().cacheGroupAffinityNodes(info.getCacheDescriptor().groupId()).isEmpty()) + U.quietAndWarn(log, "No server nodes found for cache client: " + info.getCacheDescriptor().cacheName()); + } + } - if (grp != null && !grp.isLocal() && grp.localStartVersion().equals(fut.initialVersion())) { - assert grp.affinity().lastVersion().equals(AffinityTopologyVersion.NONE) : grp.affinity().lastVersion(); + fut.timeBag().finishGlobalStage("Start caches"); - initAffinity(caches.group(grp.groupId()), grp.affinity(), fut); - } - } + initAffinityOnCacheGroupsStart(fut, exchActions, crd); + + fut.timeBag().finishGlobalStage("Affinity initialization on cache group start"); + } + + /** + * Initializes affinity for started cache groups received during {@code fut}. + * + * @param fut Exchange future. + * @param exchangeActions Exchange actions. + * @param crd {@code True} if local node is coordinator. + */ + private void initAffinityOnCacheGroupsStart( + GridDhtPartitionsExchangeFuture fut, + ExchangeActions exchangeActions, + boolean crd + ) throws IgniteCheckedException { + List startedGroups = exchangeActions.cacheStartRequests().stream() + .map(action -> action.descriptor().groupDescriptor()) + .distinct() + .collect(Collectors.toList()); + + U.doInParallel( + cctx.kernalContext().getSystemExecutorService(), + startedGroups, + grpDesc -> { + initStartedGroup(fut, grpDesc, crd); + + fut.timeBag().finishLocalStage("Affinity initialization on cache group start " + + "[grp=" + grpDesc.cacheOrGroupName() + "]"); + + return null; } - } + ); + } + + /** + * Process cache stop requests. + * + * @param fut Exchange future. + * @param crd Coordinator flag. + * @param exchActions Cache change requests. + * @param forceClose Force close flag. + * @return Set of cache groups to be stopped. + */ + private Set processCacheStopRequests( + GridDhtPartitionsExchangeFuture fut, + boolean crd, + final ExchangeActions exchActions, + boolean forceClose + ) { + assert exchActions != null && !exchActions.empty() : exchActions; for (ExchangeActions.CacheActionData action : exchActions.cacheStopRequests()) cctx.cache().blockGateway(action.request().cacheName(), true, action.request().restart()); @@ -838,13 +1052,13 @@ public void onCacheChangeRequest( Set stoppedGrps = null; - if (crd) { - for (ExchangeActions.CacheGroupActionData data : exchActions.cacheGroupsToStop()) { - if (data.descriptor().config().getCacheMode() != LOCAL) { - CacheGroupHolder cacheGrp = grpHolders.remove(data.descriptor().groupId()); + for (ExchangeActions.CacheGroupActionData data : exchActions.cacheGroupsToStop()) { + if (data.descriptor().config().getCacheMode() != LOCAL) { + CacheGroupHolder cacheGrp = grpHolders.remove(data.descriptor().groupId()); - assert cacheGrp != null : data.descriptor(); + assert !crd || (cacheGrp != null || forceClose) : data.descriptor(); + if (cacheGrp != null) { if (stoppedGrps == null) stoppedGrps = new HashSet<>(); @@ -855,51 +1069,16 @@ public void onCacheChangeRequest( } } - if (stoppedGrps != null) { - AffinityTopologyVersion notifyTopVer = null; - - synchronized (mux) { - if (waitInfo != null) { - for (Integer grpId : stoppedGrps) { - boolean rmv = waitInfo.waitGrps.remove(grpId) != null; - - if (rmv) { - notifyTopVer = waitInfo.topVer; - - waitInfo.assignments.remove(grpId); - } - } - } - } - - if (notifyTopVer != null) { - final AffinityTopologyVersion topVer = notifyTopVer; - - cctx.kernalContext().closure().runLocalSafe(new Runnable() { - @Override public void run() { - onCacheGroupStopped(topVer); - } - }); - } - } - - ClientCacheChangeDiscoveryMessage msg = clientCacheChanges.get(); - - if (msg != null) { - msg.checkCachesExist(caches.registeredCaches.keySet()); - - if (msg.empty()) - clientCacheChanges.remove(); - } + return stoppedGrps; } /** * */ - public void removeAllCacheInfo() { + public void clearGroupHoldersAndRegistry() { grpHolders.clear(); - caches.clear(); + cachesRegistry.unregisterAll(); } /** @@ -909,9 +1088,11 @@ public void removeAllCacheInfo() { * @param crd Coordinator flag. * @param msg Affinity change message. */ - public void onExchangeChangeAffinityMessage(GridDhtPartitionsExchangeFuture exchFut, + public void onExchangeChangeAffinityMessage( + GridDhtPartitionsExchangeFuture exchFut, boolean crd, - CacheAffinityChangeMessage msg) { + CacheAffinityChangeMessage msg + ) { if (log.isDebugEnabled()) { log.debug("Process exchange affinity change message [exchVer=" + exchFut.initialVersion() + ", msg=" + msg + ']'); @@ -925,8 +1106,6 @@ public void onExchangeChangeAffinityMessage(GridDhtPartitionsExchangeFuture exch assert assignment != null; - final Map>> affCache = new HashMap<>(); - forAllCacheGroups(crd, new IgniteInClosureX() { @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException { List> idealAssignment = aff.idealAssignment(); @@ -946,7 +1125,10 @@ public void onExchangeChangeAffinityMessage(GridDhtPartitionsExchangeFuture exch else newAssignment = idealAssignment; - aff.initialize(topVer, cachedAssignment(aff, newAssignment, affCache)); + aff.initialize(topVer, newAssignment); + + exchFut.timeBag().finishLocalStage("Affinity recalculate by change affinity message " + + "[grp=" + aff.cacheOrGroupName() + "]"); } }); } @@ -959,10 +1141,11 @@ public void onExchangeChangeAffinityMessage(GridDhtPartitionsExchangeFuture exch * @param msg Message. * @throws IgniteCheckedException If failed. */ - public void onChangeAffinityMessage(final GridDhtPartitionsExchangeFuture exchFut, + public void onChangeAffinityMessage( + final GridDhtPartitionsExchangeFuture exchFut, boolean crd, - final CacheAffinityChangeMessage msg) - throws IgniteCheckedException { + final CacheAffinityChangeMessage msg + ) { assert msg.topologyVersion() != null && msg.exchangeId() == null : msg; final AffinityTopologyVersion topVer = exchFut.initialVersion(); @@ -978,15 +1161,13 @@ public void onChangeAffinityMessage(final GridDhtPartitionsExchangeFuture exchFu final Map deploymentIds = msg.cacheDeploymentIds(); - final Map>> affCache = new HashMap<>(); - forAllCacheGroups(crd, new IgniteInClosureX() { @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException { AffinityTopologyVersion affTopVer = aff.lastVersion(); assert affTopVer.topologyVersion() > 0 : affTopVer; - CacheGroupDescriptor desc = caches.group(aff.groupId()); + CacheGroupDescriptor desc = cachesRegistry.group(aff.groupId()); assert desc != null : aff.cacheOrGroupName(); @@ -1024,10 +1205,15 @@ public void onChangeAffinityMessage(final GridDhtPartitionsExchangeFuture exchFu assignment.set(part, nodes); } - aff.initialize(topVer, cachedAssignment(aff, assignment, affCache)); + aff.initialize(topVer, assignment); } else aff.clientEventTopologyChange(exchFut.firstEvent(), topVer); + + cctx.exchange().exchangerUpdateHeartbeat(); + + exchFut.timeBag().finishLocalStage("Affinity change by custom message " + + "[grp=" + aff.cacheOrGroupName() + "]"); } }); } @@ -1048,6 +1234,8 @@ public void onClientEvent(final GridDhtPartitionsExchangeFuture fut, boolean crd AffinityTopologyVersion topVer = fut.initialVersion(); aff.clientEventTopologyChange(fut.firstEvent(), topVer); + + cctx.exchange().exchangerUpdateHeartbeat(); } }); } @@ -1090,14 +1278,21 @@ private void processAffinityAssignmentResponse(UUID nodeId, GridDhtAffinityAssig /** * @param c Cache closure. - * @throws IgniteCheckedException If failed */ - private void forAllRegisteredCacheGroups(IgniteInClosureX c) throws IgniteCheckedException { - for (CacheGroupDescriptor cacheDesc : caches.allGroups()) { - if (cacheDesc.config().getCacheMode() == LOCAL) - continue; + private void forAllRegisteredCacheGroups(IgniteInClosureX c) { + Collection affinityCaches = cachesRegistry.allGroups().values().stream() + .filter(desc -> desc.config().getCacheMode() != LOCAL) + .collect(Collectors.toList()); + + try { + U.doInParallel(cctx.kernalContext().getSystemExecutorService(), affinityCaches, t -> { + c.applyx(t); - c.applyx(cacheDesc); + return null; + }); + } + catch (IgniteCheckedException e) { + throw new IgniteException("Failed to execute affinity operation on cache groups", e); } } @@ -1106,17 +1301,32 @@ private void forAllRegisteredCacheGroups(IgniteInClosureX * @param c Closure. */ private void forAllCacheGroups(boolean crd, IgniteInClosureX c) { - if (crd) { - for (CacheGroupHolder grp : grpHolders.values()) - c.apply(grp.affinity()); - } - else { - for (CacheGroupContext grp : cctx.kernalContext().cache().cacheGroups()) { - if (grp.isLocal()) - continue; + Collection affinityCaches; - c.apply(grp.affinity()); - } + Collection affinityCaches1 = grpHolders.values().stream() + .map(CacheGroupHolder::affinity) + .collect(Collectors.toList()); + + Collection affinityCaches2 = cctx.kernalContext().cache().cacheGroups().stream() + .filter(grp -> !grp.isLocal()) + .filter(grp -> !grp.isRecoveryMode()) + .map(CacheGroupContext::affinity) + .collect(Collectors.toList()); + + if (!cctx.localNode().isClient()) + affinityCaches = affinityCaches1; + else + affinityCaches = affinityCaches2; + + try { + U.doInParallel(cctx.kernalContext().getSystemExecutorService(), affinityCaches, t -> { + c.applyx(t); + + return null; + }); + } + catch (IgniteCheckedException e) { + throw new IgniteException("Failed to execute affinity operation on cache groups", e); } } @@ -1125,7 +1335,7 @@ private void forAllCacheGroups(boolean crd, IgniteInClosureX initStartedCaches( boolean crd, final GridDhtPartitionsExchangeFuture fut, Collection descs ) throws IgniteCheckedException { - caches.initStartedCaches(descs); + IgniteInternalFuture res = cachesRegistry.addUnregistered(descs); if (fut.context().mergeExchanges()) - return; + return res; - if (crd) { - forAllRegisteredCacheGroups(new IgniteInClosureX() { - @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { - CacheGroupHolder cache = groupHolder(fut.initialVersion(), desc); + forAllRegisteredCacheGroups(new IgniteInClosureX() { + @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { + CacheGroupHolder cache = getOrCreateGroupHolder(fut.initialVersion(), desc); - if (cache.affinity().lastVersion().equals(AffinityTopologyVersion.NONE)) - calculateAndInit(fut.events(), cache.affinity(), fut.initialVersion()); - } - }); - } - else { - forAllCacheGroups(false, new IgniteInClosureX() { - @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException { - if (aff.lastVersion().equals(AffinityTopologyVersion.NONE)) - initAffinity(caches.group(aff.groupId()), aff, fut); + if (cache.affinity().lastVersion().equals(AffinityTopologyVersion.NONE)) { + initAffinity(desc, cache.affinity(), fut); + + cctx.exchange().exchangerUpdateHeartbeat(); + + fut.timeBag().finishLocalStage("Affinity initialization (new cache) " + + "[grp=" + desc.cacheOrGroupName() + ", crd=" + crd + "]"); } - }); - } + } + }); + + return res; } /** @@ -1220,7 +1424,11 @@ private void initAffinity(CacheGroupDescriptor desc, fetchFut.init(false); - fetchAffinity(evts.topologyVersion(), evts, evts.discoveryCache(), aff, fetchFut); + fetchAffinity(evts.topologyVersion(), + evts, + evts.discoveryCache(), + aff, + fetchFut); } } @@ -1265,11 +1473,12 @@ public GridAffinityAssignmentCache affinity(Integer grpId) { * @param fut Current exchange future. * @param msg Finish exchange message. */ - public void applyAffinityFromFullMessage(final GridDhtPartitionsExchangeFuture fut, - final GridDhtPartitionsFullMessage msg) { - final Map nodesByOrder = new HashMap<>(); - - final Map>> affCache = new HashMap<>(); + public void applyAffinityFromFullMessage( + final GridDhtPartitionsExchangeFuture fut, + final GridDhtPartitionsFullMessage msg + ) { + // Please do not use following pattern of code (nodesByOrder, affCache). NEVER. + final Map nodesByOrder = new ConcurrentHashMap<>(); forAllCacheGroups(false, new IgniteInClosureX() { @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException { @@ -1301,7 +1510,10 @@ public void applyAffinityFromFullMessage(final GridDhtPartitionsExchangeFuture f else newAssignment = idealAssignment; - aff.initialize(evts.topologyVersion(), cachedAssignment(aff, newAssignment, affCache)); + aff.initialize(evts.topologyVersion(), newAssignment); + + fut.timeBag().finishLocalStage("Affinity applying from full message " + + "[grp=" + aff.cacheOrGroupName() + "]"); } }); } @@ -1310,33 +1522,38 @@ public void applyAffinityFromFullMessage(final GridDhtPartitionsExchangeFuture f * @param fut Current exchange future. * @param msg Message finish message. * @param resTopVer Result topology version. - * @throws IgniteCheckedException If failed. */ - public void onLocalJoin(final GridDhtPartitionsExchangeFuture fut, + public void onLocalJoin( + final GridDhtPartitionsExchangeFuture fut, GridDhtPartitionsFullMessage msg, - final AffinityTopologyVersion resTopVer) - throws IgniteCheckedException { + final AffinityTopologyVersion resTopVer + ) { final Set affReq = fut.context().groupsAffinityRequestOnJoin(); - final Map nodesByOrder = new HashMap<>(); + final Map receivedAff = msg.joinedNodeAffinity(); - final Map joinedNodeAff = msg.joinedNodeAffinity(); + assert F.isEmpty(affReq) || (!F.isEmpty(receivedAff) && receivedAff.size() >= affReq.size()) + : ("Requested and received affinity are different " + + "[requestedCnt=" + (affReq != null ? affReq.size() : "none") + + ", receivedCnt=" + (receivedAff != null ? receivedAff.size() : "none") + + ", msg=" + msg + "]"); - assert !F.isEmpty(joinedNodeAff) : msg; - assert joinedNodeAff.size() >= affReq.size(); + final Map nodesByOrder = new ConcurrentHashMap<>(); - forAllCacheGroups(false, new IgniteInClosureX() { - @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException { + forAllRegisteredCacheGroups(new IgniteInClosureX() { + @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { ExchangeDiscoveryEvents evts = fut.context().events(); - CacheGroupContext grp = cctx.cache().cacheGroup(aff.groupId()); + CacheGroupHolder holder = getOrCreateGroupHolder(fut.initialVersion(), desc); - assert grp != null; + GridAffinityAssignmentCache aff = holder.affinity(); + + CacheGroupContext grp = cctx.cache().cacheGroup(holder.groupId()); - if (affReq.contains(aff.groupId())) { - assert AffinityTopologyVersion.NONE.equals(aff.lastVersion()); + if (affReq != null && affReq.contains(aff.groupId())) { + assert resTopVer.compareTo(aff.lastVersion()) >= 0 : aff.lastVersion(); - CacheGroupAffinityMessage affMsg = joinedNodeAff.get(aff.groupId()); + CacheGroupAffinityMessage affMsg = receivedAff.get(aff.groupId()); assert affMsg != null; @@ -1358,10 +1575,14 @@ public void onLocalJoin(final GridDhtPartitionsExchangeFuture fut, aff.initialize(evts.topologyVersion(), assignments); } - else if (fut.cacheGroupAddedOnExchange(aff.groupId(), grp.receivedFrom())) + else if (grp != null && fut.cacheGroupAddedOnExchange(aff.groupId(), grp.receivedFrom())) calculateAndInit(evts, aff, evts.topologyVersion()); - grp.topology().initPartitionsWhenAffinityReady(resTopVer, fut); + if (grp != null) + grp.topology().initPartitionsWhenAffinityReady(resTopVer, fut); + + fut.timeBag().finishLocalStage("Affinity initialization (local join) " + + "[grp=" + aff.cacheOrGroupName() + "]"); } }); } @@ -1398,14 +1619,15 @@ public void onServerJoinWithExchangeMergeProtocol(GridDhtPartitionsExchangeFutur * @throws IgniteCheckedException If failed. */ public Map onServerLeftWithExchangeMergeProtocol( - final GridDhtPartitionsExchangeFuture fut) throws IgniteCheckedException - { + final GridDhtPartitionsExchangeFuture fut) throws IgniteCheckedException { final ExchangeDiscoveryEvents evts = fut.context().events(); assert fut.context().mergeExchanges(); assert evts.hasServerLeft(); - return onReassignmentEnforced(fut); + Map result = onReassignmentEnforced(fut); + + return result; } /** @@ -1416,11 +1638,12 @@ public Map onServerLeftWithExchangeMergeProt * @throws IgniteCheckedException If failed. */ public Map onCustomEventWithEnforcedAffinityReassignment( - final GridDhtPartitionsExchangeFuture fut) throws IgniteCheckedException - { + final GridDhtPartitionsExchangeFuture fut) throws IgniteCheckedException { assert DiscoveryCustomEvent.requiresCentralizedAffinityAssignment(fut.firstEvent()); - return onReassignmentEnforced(fut); + Map result = onReassignmentEnforced(fut); + + return result; } /** @@ -1428,9 +1651,8 @@ public Map onCustomEventWithEnforcedAffinity * * @param fut Current exchange future. * @return Computed difference with ideal affinity. - * @throws IgniteCheckedException If failed. */ - private Map onReassignmentEnforced( + public Map onReassignmentEnforced( final GridDhtPartitionsExchangeFuture fut) throws IgniteCheckedException { final ExchangeDiscoveryEvents evts = fut.context().events(); @@ -1439,12 +1661,19 @@ private Map onReassignmentEnforced( @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { AffinityTopologyVersion topVer = evts.topologyVersion(); - CacheGroupHolder cache = groupHolder(topVer, desc); + CacheGroupHolder grpHolder = getOrCreateGroupHolder(topVer, desc); - List> assign = cache.affinity().calculate(topVer, evts, evts.discoveryCache()); + // Already calculated. + if (grpHolder.affinity().lastVersion().equals(topVer)) + return; - if (!cache.rebalanceEnabled || fut.cacheGroupAddedOnExchange(desc.groupId(), desc.receivedFrom())) - cache.affinity().initialize(topVer, assign); + List> assign = grpHolder.affinity().calculate(topVer, evts, evts.discoveryCache()); + + if (!grpHolder.rebalanceEnabled || fut.cacheGroupAddedOnExchange(desc.groupId(), desc.receivedFrom())) + grpHolder.affinity().initialize(topVer, assign); + + fut.timeBag().finishLocalStage("Affinity initialization (enforced) " + + "[grp=" + desc.cacheOrGroupName() + "]"); } }); @@ -1472,19 +1701,28 @@ public void onServerJoin(final GridDhtPartitionsExchangeFuture fut, boolean crd) WaitRebalanceInfo waitRebalanceInfo = null; if (locJoin) { - if (crd) { - forAllRegisteredCacheGroups(new IgniteInClosureX() { - @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { - AffinityTopologyVersion topVer = fut.initialVersion(); + forAllRegisteredCacheGroups(new IgniteInClosureX() { + @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { + AffinityTopologyVersion topVer = fut.initialVersion(); - CacheGroupHolder grpHolder = groupHolder(topVer, desc); + CacheGroupHolder grpHolder = getOrCreateGroupHolder(topVer, desc); + if (crd) { calculateAndInit(fut.events(), grpHolder.affinity(), topVer); + + cctx.exchange().exchangerUpdateHeartbeat(); + + fut.timeBag().finishLocalStage("First node affinity initialization (node join) " + + "[grp=" + desc.cacheOrGroupName() + "]"); } - }); - } - else + } + }); + + if (!crd) { fetchAffinityOnJoin(fut); + + fut.timeBag().finishLocalStage("Affinity fetch"); + } } else waitRebalanceInfo = initAffinityOnNodeJoin(fut, crd); @@ -1506,7 +1744,8 @@ public void onServerJoin(final GridDhtPartitionsExchangeFuture fut, boolean crd) * @param crd Coordinator flag. * @throws IgniteCheckedException If failed. */ - public void onBaselineTopologyChanged(final GridDhtPartitionsExchangeFuture fut, boolean crd) throws IgniteCheckedException { + public void onBaselineTopologyChanged(final GridDhtPartitionsExchangeFuture fut, + boolean crd) throws IgniteCheckedException { assert !fut.firstEvent().eventNode().isClient(); WaitRebalanceInfo waitRebalanceInfo = initAffinityOnNodeJoin(fut, crd); @@ -1531,7 +1770,7 @@ private String groupNames(Collection grpIds) { StringBuilder names = new StringBuilder(); for (Integer grpId : grpIds) { - String name = caches.group(grpId).cacheOrGroupName(); + String name = cachesRegistry.group(grpId).cacheOrGroupName(); if (names.length() != 0) names.append(", "); @@ -1547,7 +1786,7 @@ private String groupNames(Collection grpIds) { * @return Group name for debug purpose. */ private String debugGroupName(int grpId) { - CacheGroupDescriptor desc = caches.group(grpId); + CacheGroupDescriptor desc = cachesRegistry.group(grpId); if (desc != null) return desc.cacheOrGroupName(); @@ -1576,40 +1815,40 @@ private void calculateAndInit(ExchangeDiscoveryEvents evts, private void fetchAffinityOnJoin(GridDhtPartitionsExchangeFuture fut) throws IgniteCheckedException { AffinityTopologyVersion topVer = fut.initialVersion(); - List fetchFuts = new ArrayList<>(); + List fetchFuts = Collections.synchronizedList(new ArrayList<>()); - for (CacheGroupContext grp : cctx.cache().cacheGroups()) { - if (grp.isLocal()) - continue; - - if (fut.cacheGroupAddedOnExchange(grp.groupId(), grp.receivedFrom())) { - // In case if merge is allowed do not calculate affinity since it can change on exchange end. - if (!fut.context().mergeExchanges()) - calculateAndInit(fut.events(), grp.affinity(), topVer); - } - else { - if (fut.context().fetchAffinityOnJoin()) { - CacheGroupDescriptor grpDesc = caches.group(grp.groupId()); - - assert grpDesc != null : grp.cacheOrGroupName(); - - GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture(cctx, - grpDesc.groupId(), - topVer, - fut.events().discoveryCache()); + forAllRegisteredCacheGroups(new IgniteInClosureX() { + @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { + if (cctx.kernalContext().clientNode() && cctx.cache().cacheGroup(desc.groupId()) == null) + return; // Skip non-started caches on client nodes. - fetchFut.init(false); + CacheGroupHolder holder = getOrCreateGroupHolder(topVer, desc); - fetchFuts.add(fetchFut); + if (fut.cacheGroupAddedOnExchange(desc.groupId(), desc.receivedFrom())) { + // In case if merge is allowed do not calculate affinity since it can change on exchange end. + if (!fut.context().mergeExchanges()) + calculateAndInit(fut.events(), holder.affinity(), topVer); } else { - if (fut.events().discoveryCache().serverNodes().size() > 0) - fut.context().addGroupAffinityRequestOnJoin(grp.groupId()); - else - calculateAndInit(fut.events(), grp.affinity(), topVer); + if (fut.context().fetchAffinityOnJoin()) { + GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture(cctx, + desc.groupId(), + topVer, + fut.events().discoveryCache()); + + fetchFut.init(false); + + fetchFuts.add(fetchFut); + } + else { + if (!fut.events().discoveryCache().serverNodes().isEmpty()) + fut.context().addGroupAffinityRequestOnJoin(desc.groupId()); + else + calculateAndInit(fut.events(), holder.affinity(), topVer); + } } - } - } + cctx.exchange().exchangerUpdateHeartbeat(); } + }); for (int i = 0; i < fetchFuts.size(); i++) { GridDhtAssignmentFetchFuture fetchFut = fetchFuts.get(i); @@ -1619,8 +1858,10 @@ private void fetchAffinityOnJoin(GridDhtPartitionsExchangeFuture fut) throws Ign fetchAffinity(topVer, fut.events(), fut.events().discoveryCache(), - cctx.cache().cacheGroup(grpId).affinity(), + groupAffinity(grpId), fetchFut); + + cctx.exchange().exchangerUpdateHeartbeat(); } } @@ -1633,12 +1874,13 @@ private void fetchAffinityOnJoin(GridDhtPartitionsExchangeFuture fut) throws Ign * @throws IgniteCheckedException If failed. * @return Affinity assignment response. */ - private GridDhtAffinityAssignmentResponse fetchAffinity(AffinityTopologyVersion topVer, + private GridDhtAffinityAssignmentResponse fetchAffinity( + AffinityTopologyVersion topVer, @Nullable ExchangeDiscoveryEvents events, DiscoCache discoCache, GridAffinityAssignmentCache affCache, - GridDhtAssignmentFetchFuture fetchFut) - throws IgniteCheckedException { + GridDhtAssignmentFetchFuture fetchFut + ) throws IgniteCheckedException { assert affCache != null; GridDhtAffinityAssignmentResponse res = fetchFut.get(); @@ -1674,30 +1916,26 @@ private GridDhtAffinityAssignmentResponse fetchAffinity(AffinityTopologyVersion * * @param fut Exchange future. * @param crd Coordinator flag. - * @throws IgniteCheckedException If failed. * @return {@code True} if affinity should be assigned by coordinator. + * @throws IgniteCheckedException If failed. */ - public boolean onCentralizedAffinityChange(final GridDhtPartitionsExchangeFuture fut, boolean crd) throws IgniteCheckedException { + public boolean onCentralizedAffinityChange(final GridDhtPartitionsExchangeFuture fut, + boolean crd) throws IgniteCheckedException { assert (fut.events().hasServerLeft() && !fut.firstEvent().eventNode().isClient()) || DiscoveryCustomEvent.requiresCentralizedAffinityAssignment(fut.firstEvent()) : fut.firstEvent(); - if (crd) { - // Need initialize CacheGroupHolders if this node become coordinator on this exchange. - forAllRegisteredCacheGroups(new IgniteInClosureX() { - @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { - CacheGroupHolder cache = groupHolder(fut.initialVersion(), desc); + forAllRegisteredCacheGroups(new IgniteInClosureX() { + @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { + CacheGroupHolder cache = getOrCreateGroupHolder(fut.initialVersion(), desc); - cache.aff.calculate(fut.initialVersion(), fut.events(), fut.events().discoveryCache()); - } - }); - } - else { - forAllCacheGroups(false, new IgniteInClosureX() { - @Override public void applyx(GridAffinityAssignmentCache aff) throws IgniteCheckedException { - aff.calculate(fut.initialVersion(), fut.events(), fut.events().discoveryCache()); - } - }); - } + cache.aff.calculate(fut.initialVersion(), fut.events(), fut.events().discoveryCache()); + + cctx.exchange().exchangerUpdateHeartbeat(); + + fut.timeBag().finishLocalStage("Affinity centralized initialization (crd) " + + "[grp=" + desc.cacheOrGroupName() + ", crd=" + crd + "]"); + } + }); synchronized (mux) { this.waitInfo = null; @@ -1709,20 +1947,27 @@ public boolean onCentralizedAffinityChange(final GridDhtPartitionsExchangeFuture /** * @param fut Exchange future. * @param newAff {@code True} if there are no older nodes with affinity info available. - * @throws IgniteCheckedException If failed. * @return Future completed when caches initialization is done. + * @throws IgniteCheckedException If failed. */ - public IgniteInternalFuture initCoordinatorCaches(final GridDhtPartitionsExchangeFuture fut, - final boolean newAff) throws IgniteCheckedException { - final List> futs = new ArrayList<>(); + public IgniteInternalFuture initCoordinatorCaches( + final GridDhtPartitionsExchangeFuture fut, + final boolean newAff + ) throws IgniteCheckedException { + boolean locJoin = fut.firstEvent().eventNode().isLocal(); + + if (!locJoin) + return null; + + final List> futs = Collections.synchronizedList(new ArrayList<>()); final AffinityTopologyVersion topVer = fut.initialVersion(); forAllRegisteredCacheGroups(new IgniteInClosureX() { @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { - CacheGroupHolder grpHolder = grpHolders.get(desc.groupId()); + CacheGroupHolder grpHolder = getOrCreateGroupHolder(topVer, desc); - if (grpHolder != null) + if (grpHolder.affinity().idealAssignment() != null) return; // Need initialize holders and affinity if this node became coordinator during this exchange. @@ -1731,15 +1976,7 @@ public IgniteInternalFuture initCoordinatorCaches(final GridDhtPartitionsExch CacheGroupContext grp = cctx.cache().cacheGroup(grpId); if (grp == null) { - cctx.io().addCacheGroupHandler(desc.groupId(), GridDhtAffinityAssignmentResponse.class, - new IgniteBiInClosure() { - @Override public void apply(UUID nodeId, GridDhtAffinityAssignmentResponse res) { - processAffinityAssignmentResponse(nodeId, res); - } - } - ); - - grpHolder = CacheGroupHolder2.create(cctx, desc, topVer, null); + grpHolder = createHolder(cctx, desc, topVer, null); final GridAffinityAssignmentCache aff = grpHolder.affinity(); @@ -1757,37 +1994,59 @@ public IgniteInternalFuture initCoordinatorCaches(final GridDhtPartitionsExch assert idx >= 0 && idx < exchFuts.size() - 1 : "Invalid exchange futures state [cur=" + idx + ", total=" + exchFuts.size() + ']'; - final GridDhtPartitionsExchangeFuture prev = exchFuts.get(idx + 1); + GridDhtPartitionsExchangeFuture futureToFetchAffinity = null; + + for (int i = idx + 1; i < exchFuts.size(); i++) { + GridDhtPartitionsExchangeFuture prev = exchFuts.get(i); + + assert prev.isDone() && prev.topologyVersion().compareTo(topVer) < 0; + + if (prev.isMerged()) + continue; - assert prev.isDone() && prev.topologyVersion().compareTo(topVer) < 0 : prev; + futureToFetchAffinity = prev; + + break; + } + + if (futureToFetchAffinity == null) + throw new IgniteCheckedException("Failed to find completed exchange future to fetch affinity."); if (log.isDebugEnabled()) { log.debug("Need initialize affinity on coordinator [" + "cacheGrp=" + desc.cacheOrGroupName() + - "prevAff=" + prev.topologyVersion() + ']'); + "prevAff=" + futureToFetchAffinity.topologyVersion() + ']'); } - GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture(cctx, + GridDhtAssignmentFetchFuture fetchFut = new GridDhtAssignmentFetchFuture( + cctx, desc.groupId(), - prev.topologyVersion(), - prev.events().discoveryCache()); + futureToFetchAffinity.topologyVersion(), + futureToFetchAffinity.events().discoveryCache() + ); fetchFut.init(false); final GridFutureAdapter affFut = new GridFutureAdapter<>(); + final GridDhtPartitionsExchangeFuture futureToFetchAffinity0 = futureToFetchAffinity; + fetchFut.listen(new IgniteInClosureX>() { @Override public void applyx(IgniteInternalFuture fetchFut) throws IgniteCheckedException { - fetchAffinity(prev.topologyVersion(), - prev.events(), - prev.events().discoveryCache(), + fetchAffinity( + futureToFetchAffinity0.topologyVersion(), + futureToFetchAffinity0.events(), + futureToFetchAffinity0.events().discoveryCache(), aff, - (GridDhtAssignmentFetchFuture)fetchFut); + (GridDhtAssignmentFetchFuture)fetchFut + ); aff.calculate(topVer, fut.events(), fut.events().discoveryCache()); affFut.onDone(topVer); + + cctx.exchange().exchangerUpdateHeartbeat(); } }); @@ -1795,7 +2054,7 @@ public IgniteInternalFuture initCoordinatorCaches(final GridDhtPartitionsExch } } else { - grpHolder = new CacheGroupHolder1(grp, null); + grpHolder = new CacheGroupAffNodeHolder(grp); if (newAff) { GridAffinityAssignmentCache aff = grpHolder.affinity(); @@ -1807,9 +2066,12 @@ public IgniteInternalFuture initCoordinatorCaches(final GridDhtPartitionsExch } } - CacheGroupHolder old = grpHolders.put(grpHolder.groupId(), grpHolder); + grpHolders.put(grpHolder.groupId(), grpHolder); + + cctx.exchange().exchangerUpdateHeartbeat(); - assert old == null : old; + fut.timeBag().finishLocalStage("Coordinator affinity cache init " + + "[grp=" + desc.cacheOrGroupName() + "]"); } }); @@ -1833,28 +2095,41 @@ public IgniteInternalFuture initCoordinatorCaches(final GridDhtPartitionsExch * @return Cache holder. * @throws IgniteCheckedException If failed. */ - private CacheGroupHolder groupHolder(AffinityTopologyVersion topVer, final CacheGroupDescriptor desc) + private CacheGroupHolder getOrCreateGroupHolder(AffinityTopologyVersion topVer, CacheGroupDescriptor desc) throws IgniteCheckedException { CacheGroupHolder cacheGrp = grpHolders.get(desc.groupId()); if (cacheGrp != null) return cacheGrp; - final CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId()); + return createGroupHolder(topVer, desc, cctx.cache().cacheGroup(desc.groupId()) != null); + } - if (grp == null) { - cctx.io().addCacheGroupHandler(desc.groupId(), GridDhtAffinityAssignmentResponse.class, - new IgniteBiInClosure() { - @Override public void apply(UUID nodeId, GridDhtAffinityAssignmentResponse res) { - processAffinityAssignmentResponse(nodeId, res); - } - } - ); + /** + * @param topVer Topology version. + * @param desc Cache descriptor. + * @param affNode Affinity node flag. + * @return Cache holder. + * @throws IgniteCheckedException If failed. + */ + private CacheGroupHolder createGroupHolder( + AffinityTopologyVersion topVer, + CacheGroupDescriptor desc, + boolean affNode + ) throws IgniteCheckedException { + assert topVer != null; + assert desc != null; - cacheGrp = CacheGroupHolder2.create(cctx, desc, topVer, null); - } - else - cacheGrp = new CacheGroupHolder1(grp, null); + CacheGroupContext grp = cctx.cache().cacheGroup(desc.groupId()); + + cctx.io().addCacheGroupHandler(desc.groupId(), GridDhtAffinityAssignmentResponse.class, + (IgniteBiInClosure)this::processAffinityAssignmentResponse); + + assert (affNode && grp != null) || (!affNode && grp == null); + + CacheGroupHolder cacheGrp = affNode ? + new CacheGroupAffNodeHolder(grp) : + createHolder(cctx, desc, topVer, null); CacheGroupHolder old = grpHolders.put(desc.groupId(), cacheGrp); @@ -1866,75 +2141,66 @@ private CacheGroupHolder groupHolder(AffinityTopologyVersion topVer, final Cache /** * @param fut Current exchange future. * @param crd Coordinator flag. - * @throws IgniteCheckedException If failed. * @return Rabalance info. */ - @Nullable private WaitRebalanceInfo initAffinityOnNodeJoin(final GridDhtPartitionsExchangeFuture fut, boolean crd) - throws IgniteCheckedException { + @Nullable private WaitRebalanceInfo initAffinityOnNodeJoin(final GridDhtPartitionsExchangeFuture fut, boolean crd) { final ExchangeDiscoveryEvents evts = fut.context().events(); - final Map>> affCache = new HashMap<>(); + final WaitRebalanceInfo waitRebalanceInfo = new WaitRebalanceInfo(evts.lastServerEventVersion()); - if (!crd) { - for (CacheGroupContext grp : cctx.cache().cacheGroups()) { - if (grp.isLocal()) - continue; + forAllRegisteredCacheGroups(new IgniteInClosureX() { + @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { + if (cctx.localNode().isClient() && cctx.cache().cacheGroup(desc.groupId()) == null) + return; - boolean latePrimary = grp.rebalanceEnabled(); + CacheGroupHolder grpHolder = getOrCreateGroupHolder(evts.topologyVersion(), desc); - initAffinityOnNodeJoin(evts, - evts.nodeJoined(grp.receivedFrom()), - grp.affinity(), - null, - latePrimary, - affCache); - } + // Already calculated. + if (grpHolder.affinity().lastVersion().equals(evts.topologyVersion())) + return; - return null; - } - else { - final WaitRebalanceInfo waitRebalanceInfo = new WaitRebalanceInfo(evts.lastServerEventVersion()); + boolean latePrimary = grpHolder.rebalanceEnabled; - forAllRegisteredCacheGroups(new IgniteInClosureX() { - @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { - CacheGroupHolder cache = groupHolder(evts.topologyVersion(), desc); + boolean grpAdded = evts.nodeJoined(desc.receivedFrom()); - boolean latePrimary = cache.rebalanceEnabled; + initAffinityOnNodeJoin(evts, + grpAdded, + grpHolder.affinity(), + crd ? waitRebalanceInfo : null, + latePrimary); - boolean grpAdded = evts.nodeJoined(desc.receivedFrom()); + if (crd && grpAdded) { + AffinityAssignment aff = grpHolder.aff.cachedAffinity(grpHolder.aff.lastVersion()); - initAffinityOnNodeJoin(evts, - grpAdded, - cache.affinity(), - waitRebalanceInfo, - latePrimary, - affCache); + assert evts.topologyVersion().equals(aff.topologyVersion()) : "Unexpected version [" + + "grp=" + grpHolder.aff.cacheOrGroupName() + + ", evts=" + evts.topologyVersion() + + ", aff=" + grpHolder.aff.lastVersion() + ']'; - if (grpAdded) { - AffinityAssignment aff = cache.aff.cachedAffinity(cache.aff.lastVersion()); + Map map = affinityFullMap(aff); - assert evts.topologyVersion().equals(aff.topologyVersion()) : "Unexpected version [" + - "grp=" + cache.aff.cacheOrGroupName() + - ", evts=" + evts.topologyVersion() + - ", aff=" + cache.aff.lastVersion() + ']'; + for (GridDhtPartitionMap map0 : map.values()) + grpHolder.topology(fut.context().events().discoveryCache()).update(fut.exchangeId(), map0, true); + } - Map map = affinityFullMap(aff); + cctx.exchange().exchangerUpdateHeartbeat(); - for (GridDhtPartitionMap map0 : map.values()) - cache.topology(fut.context().events().discoveryCache()).update(fut.exchangeId(), map0, true); - } - } - }); + fut.timeBag().finishLocalStage("Affinity initialization (node join) " + + "[grp=" + desc.cacheOrGroupName() + ", crd=" + crd + "]"); + } + }); - return waitRebalanceInfo; - } + return waitRebalanceInfo; } + /** + * @param aff Affinity assignment. + */ private Map affinityFullMap(AffinityAssignment aff) { Map map = new HashMap<>(); for (int p = 0; p < aff.assignment().size(); p++) { - HashSet ids = aff.getIds(p); + Collection ids = aff.getIds(p); for (UUID nodeId : ids) { GridDhtPartitionMap partMap = map.get(nodeId); @@ -1962,15 +2228,13 @@ private Map affinityFullMap(AffinityAssignment aff) { * @param aff Affinity. * @param rebalanceInfo Rebalance information. * @param latePrimary If {@code true} delays primary assignment if it is not owner. - * @param affCache Already calculated assignments (to reduce data stored in history). */ private void initAffinityOnNodeJoin( ExchangeDiscoveryEvents evts, boolean addedOnExchnage, GridAffinityAssignmentCache aff, WaitRebalanceInfo rebalanceInfo, - boolean latePrimary, - Map>> affCache + boolean latePrimary ) { if (addedOnExchnage) { if (!aff.lastVersion().equals(evts.topologyVersion())) @@ -1996,8 +2260,8 @@ private void initAffinityOnNodeJoin( List newNodes = idealAssignment.get(p); List curNodes = curAff.get(p); - ClusterNode curPrimary = curNodes.size() > 0 ? curNodes.get(0) : null; - ClusterNode newPrimary = newNodes.size() > 0 ? newNodes.get(0) : null; + ClusterNode curPrimary = !curNodes.isEmpty() ? curNodes.get(0) : null; + ClusterNode newPrimary = !newNodes.isEmpty() ? newNodes.get(0) : null; if (curPrimary != null && newPrimary != null && !curPrimary.equals(newPrimary)) { assert cctx.discovery().node(evts.topologyVersion(), curPrimary.id()) != null : curPrimary; @@ -2019,26 +2283,7 @@ private void initAffinityOnNodeJoin( if (newAssignment == null) newAssignment = idealAssignment; - aff.initialize(evts.topologyVersion(), cachedAssignment(aff, newAssignment, affCache)); - } - - /** - * @param aff Assignment cache. - * @param assign Assignment. - * @param affCache Assignments already calculated for other caches. - * @return Assignment. - */ - private List> cachedAssignment(GridAffinityAssignmentCache aff, - List> assign, - Map>> affCache) { - List> assign0 = affCache.get(aff.similarAffinityKey()); - - if (assign0 != null && assign0.equals(assign)) - assign = assign0; - else - affCache.put(aff.similarAffinityKey(), assign); - - return assign; + aff.initialize(evts.topologyVersion(), newAssignment); } /** @@ -2095,7 +2340,7 @@ public IgniteInternalFuture>>> initAffinity try { resFut.onDone(initAffinityBasedOnPartitionsAvailability(fut.initialVersion(), fut, NODE_TO_ID, false)); } - catch (IgniteCheckedException e) { + catch (Exception e) { resFut.onDone(e); } } @@ -2108,21 +2353,21 @@ public IgniteInternalFuture>>> initAffinity } /** - * Initializes current affinity assignment based on partitions availability. - * Nodes that have most recent data will be considered affinity nodes. + * Initializes current affinity assignment based on partitions availability. Nodes that have most recent data will + * be considered affinity nodes. * * @param topVer Topology version. * @param fut Exchange future. * @param c Closure converting affinity diff. * @param initAff {@code True} if need initialize affinity. - * @return Affinity assignment. - * @throws IgniteCheckedException If failed. + * @return Affinity assignment for each of registered cache group. */ - private Map>> initAffinityBasedOnPartitionsAvailability(final AffinityTopologyVersion topVer, + private Map>> initAffinityBasedOnPartitionsAvailability( + final AffinityTopologyVersion topVer, final GridDhtPartitionsExchangeFuture fut, final IgniteClosure c, - final boolean initAff) - throws IgniteCheckedException { + final boolean initAff + ) { final boolean enforcedCentralizedAssignment = DiscoveryCustomEvent.requiresCentralizedAffinityAssignment(fut.firstEvent()); @@ -2132,11 +2377,11 @@ private Map>> initAffinityBasedOnPartitionsAva final Collection aliveNodes = fut.context().events().discoveryCache().serverNodes(); - final Map>> assignment = new HashMap<>(); + final Map>> assignment = new ConcurrentHashMap<>(); forAllRegisteredCacheGroups(new IgniteInClosureX() { @Override public void applyx(CacheGroupDescriptor desc) throws IgniteCheckedException { - CacheGroupHolder grpHolder = groupHolder(topVer, desc); + CacheGroupHolder grpHolder = getOrCreateGroupHolder(topVer, desc); if (!grpHolder.rebalanceEnabled || (fut.cacheGroupAddedOnExchange(desc.groupId(), desc.receivedFrom()) && !enforcedCentralizedAssignment)) @@ -2167,8 +2412,8 @@ private Map>> initAffinityBasedOnPartitionsAva ", topVer=" + fut.context().events().discoveryCache().version() + ", evts=" + fut.context().events().events() + "]"; - ClusterNode curPrimary = curNodes.size() > 0 ? curNodes.get(0) : null; - ClusterNode newPrimary = newNodes.size() > 0 ? newNodes.get(0) : null; + ClusterNode curPrimary = !curNodes.isEmpty() ? curNodes.get(0) : null; + ClusterNode newPrimary = !newNodes.isEmpty() ? newNodes.get(0) : null; List newNodes0 = null; @@ -2177,16 +2422,25 @@ private Map>> initAffinityBasedOnPartitionsAva ", node=" + newPrimary + ", topVer=" + topVer + ']'; - List owners = top.owners(p); + List owners = top.owners(p, topVer); // It is essential that curPrimary node has partition in OWNING state. if (!owners.isEmpty() && !owners.contains(curPrimary)) curPrimary = owners.get(0); - if (curPrimary != null && newPrimary != null && !curPrimary.equals(newPrimary)) { - if (aliveNodes.contains(curPrimary)) { - GridDhtPartitionState state = top.partitionState(newPrimary.id(), p); + // If new assignment is empty preserve current ownership for alive nodes. + if (curPrimary != null && newPrimary == null) { + newNodes0 = new ArrayList<>(curNodes.size()); + for (ClusterNode node : curNodes) { + if (aliveNodes.contains(node)) + newNodes0.add(node); + } + } + else if (curPrimary != null && !curPrimary.equals(newPrimary)) { + GridDhtPartitionState state = top.partitionState(newPrimary.id(), p); + + if (aliveNodes.contains(curPrimary)) { if (state != GridDhtPartitionState.OWNING) { newNodes0 = latePrimaryAssignment(grpHolder.affinity(), p, @@ -2196,8 +2450,6 @@ private Map>> initAffinityBasedOnPartitionsAva } } else { - GridDhtPartitionState state = top.partitionState(newPrimary.id(), p); - if (state != GridDhtPartitionState.OWNING) { for (int i = 1; i < curNodes.size(); i++) { ClusterNode curNode = curNodes.get(i); @@ -2257,6 +2509,9 @@ private Map>> initAffinityBasedOnPartitionsAva if (initAff) grpHolder.affinity().initialize(topVer, newAssignment0); + + fut.timeBag().finishLocalStage("Affinity recalculation (partitions availability) " + + "[grp=" + desc.cacheOrGroupName() + "]"); } }); @@ -2278,7 +2533,24 @@ private Map>> initAffinityBasedOnPartitionsAva * @return All registered cache groups. */ public Map cacheGroups() { - return caches.registeredGrps; + return cachesRegistry.allGroups(); + } + + /** + * @return All registered cache groups. + */ + public Map caches() { + return cachesRegistry.allCaches(); + } + + /** + * @param grpId Cache group ID + * @return Cache affinity cache. + */ + @Nullable public GridAffinityAssignmentCache groupAffinity(int grpId) { + CacheGroupHolder grpHolder = grpHolders.get(grpId); + + return grpHolder != null ? grpHolder.affinity() : null; } /** @@ -2333,7 +2605,7 @@ private List toNodes(AffinityTopologyVersion topVer, List ids /** * */ - abstract static class CacheGroupHolder { + abstract class CacheGroupHolder { /** */ private final GridAffinityAssignmentCache aff; @@ -2359,7 +2631,7 @@ abstract static class CacheGroupHolder { /** * @return Client holder flag. */ - abstract boolean client(); + abstract boolean nonAffNode(); /** * @return Group ID. @@ -2392,15 +2664,22 @@ GridAffinityAssignmentCache affinity() { /** * Created cache is started on coordinator. */ - private class CacheGroupHolder1 extends CacheGroupHolder { + private class CacheGroupAffNodeHolder extends CacheGroupHolder { /** */ private final CacheGroupContext grp; + /** + * @param grp Cache group. + */ + CacheGroupAffNodeHolder(CacheGroupContext grp) { + this(grp, null); + } + /** * @param grp Cache group. * @param initAff Current affinity. */ - CacheGroupHolder1(CacheGroupContext grp, @Nullable GridAffinityAssignmentCache initAff) { + CacheGroupAffNodeHolder(CacheGroupContext grp, @Nullable GridAffinityAssignmentCache initAff) { super(grp.rebalanceEnabled(), grp.affinity(), initAff); assert !grp.isLocal() : grp; @@ -2409,7 +2688,7 @@ private class CacheGroupHolder1 extends CacheGroupHolder { } /** {@inheritDoc} */ - @Override public boolean client() { + @Override public boolean nonAffNode() { return false; } @@ -2422,10 +2701,42 @@ private class CacheGroupHolder1 extends CacheGroupHolder { /** * Created if cache is not started on coordinator. */ - private static class CacheGroupHolder2 extends CacheGroupHolder { + private class CacheGroupNoAffOrFiltredHolder extends CacheGroupHolder { /** */ private final GridCacheSharedContext cctx; + /** + * @param rebalanceEnabled Rebalance flag. + * @param cctx Context. + * @param aff Affinity. + * @param initAff Current affinity. + */ + CacheGroupNoAffOrFiltredHolder( + boolean rebalanceEnabled, + GridCacheSharedContext cctx, + GridAffinityAssignmentCache aff, + @Nullable GridAffinityAssignmentCache initAff + ) { + super(rebalanceEnabled, aff, initAff); + + this.cctx = cctx; + } + + /** + * @param cctx Context. + * @param grpDesc Cache group descriptor. + * @param topVer Current exchange version. + * @return Cache holder. + * @throws IgniteCheckedException If failed. + */ + CacheGroupNoAffOrFiltredHolder create( + GridCacheSharedContext cctx, + CacheGroupDescriptor grpDesc, + AffinityTopologyVersion topVer + ) throws IgniteCheckedException { + return create(cctx, grpDesc, topVer, null); + } + /** * @param cctx Context. * @param grpDesc Cache group descriptor. @@ -2434,13 +2745,14 @@ private static class CacheGroupHolder2 extends CacheGroupHolder { * @return Cache holder. * @throws IgniteCheckedException If failed. */ - static CacheGroupHolder2 create( + CacheGroupNoAffOrFiltredHolder create( GridCacheSharedContext cctx, CacheGroupDescriptor grpDesc, AffinityTopologyVersion topVer, - @Nullable GridAffinityAssignmentCache initAff) throws IgniteCheckedException { + @Nullable GridAffinityAssignmentCache initAff + ) throws IgniteCheckedException { assert grpDesc != null; - assert !cctx.kernalContext().clientNode(); + assert !cctx.kernalContext().clientNode() || !CU.affinityNode(cctx.localNode(), grpDesc.config().getNodeFilter()); CacheConfiguration ccfg = grpDesc.config(); @@ -2466,27 +2778,11 @@ static CacheGroupHolder2 create( ccfg.getCacheMode() == LOCAL, grpDesc.persistenceEnabled()); - return new CacheGroupHolder2(ccfg.getRebalanceMode() != NONE, cctx, aff, initAff); - } - - /** - * @param rebalanceEnabled Rebalance flag. - * @param cctx Context. - * @param aff Affinity. - * @param initAff Current affinity. - */ - CacheGroupHolder2( - boolean rebalanceEnabled, - GridCacheSharedContext cctx, - GridAffinityAssignmentCache aff, - @Nullable GridAffinityAssignmentCache initAff) { - super(rebalanceEnabled, aff, initAff); - - this.cctx = cctx; + return new CacheGroupNoAffOrFiltredHolder(ccfg.getRebalanceMode() != NONE, cctx, aff, initAff); } /** {@inheritDoc} */ - @Override public boolean client() { + @Override public boolean nonAffNode() { return true; } @@ -2496,21 +2792,58 @@ static CacheGroupHolder2 create( } } + private CacheGroupNoAffOrFiltredHolder createHolder( + GridCacheSharedContext cctx, + CacheGroupDescriptor grpDesc, + AffinityTopologyVersion topVer, + @Nullable GridAffinityAssignmentCache initAff + ) throws IgniteCheckedException { + assert grpDesc != null; + assert !cctx.kernalContext().clientNode() || !CU.affinityNode(cctx.localNode(), grpDesc.config().getNodeFilter()); + + CacheConfiguration ccfg = grpDesc.config(); + + assert ccfg != null : grpDesc; + assert ccfg.getCacheMode() != LOCAL : ccfg.getName(); + + assert !cctx.discovery().cacheGroupAffinityNodes(grpDesc.groupId(), + topVer).contains(cctx.localNode()) : grpDesc.cacheOrGroupName(); + + AffinityFunction affFunc = cctx.cache().clone(ccfg.getAffinity()); + + cctx.kernalContext().resource().injectGeneric(affFunc); + cctx.kernalContext().resource().injectCacheName(affFunc, ccfg.getName()); + + U.startLifecycleAware(F.asList(affFunc)); + + GridAffinityAssignmentCache aff = new GridAffinityAssignmentCache(cctx.kernalContext(), + grpDesc.cacheOrGroupName(), + grpDesc.groupId(), + affFunc, + ccfg.getNodeFilter(), + ccfg.getBackups(), + ccfg.getCacheMode() == LOCAL, + grpDesc.persistenceEnabled()); + + return new CacheGroupNoAffOrFiltredHolder(ccfg.getRebalanceMode() != NONE, cctx, aff, initAff); + } + /** - * + * Tracks rebalance state on coordinator. + * After all partitions are rebalanced the current affinity is switched to ideal. */ class WaitRebalanceInfo { /** */ private final AffinityTopologyVersion topVer; /** */ - private Map> waitGrps; + private final Map> waitGrps = new ConcurrentHashMap<>(); /** */ - private Map>> assignments; + private final Map>> assignments = new ConcurrentHashMap<>(); /** */ - private Map deploymentIds; + private final Map deploymentIds = new ConcurrentHashMap<>(); /** * @param topVer Topology version. @@ -2523,47 +2856,32 @@ class WaitRebalanceInfo { * @return {@code True} if there are partitions waiting for rebalancing. */ boolean empty() { - if (waitGrps != null) { - assert !waitGrps.isEmpty(); + boolean isEmpty = waitGrps.isEmpty(); + + if (!isEmpty) { assert waitGrps.size() == assignments.size(); return false; } - return true; + return isEmpty; } /** + * Adds a partition to wait set. + * * @param grpId Group ID. * @param part Partition. * @param waitNode Node rebalancing data. * @param assignment New assignment. */ - void add(Integer grpId, Integer part, UUID waitNode, List assignment) { - assert !F.isEmpty(assignment) : assignment; - - if (waitGrps == null) { - waitGrps = new HashMap<>(); - assignments = new HashMap<>(); - deploymentIds = new HashMap<>(); - } - - Map cacheWaitParts = waitGrps.get(grpId); - - if (cacheWaitParts == null) { - waitGrps.put(grpId, cacheWaitParts = new HashMap<>()); + void add(Integer grpId, Integer part, UUID waitNode, @Nullable List assignment) { + deploymentIds.putIfAbsent(grpId, cachesRegistry.group(grpId).deploymentId()); - deploymentIds.put(grpId, caches.group(grpId).deploymentId()); - } - - cacheWaitParts.put(part, waitNode); - - Map> cacheAssignment = assignments.get(grpId); + waitGrps.computeIfAbsent(grpId, k -> new HashMap<>()).put(part, waitNode); - if (cacheAssignment == null) - assignments.put(grpId, cacheAssignment = new HashMap<>()); - - cacheAssignment.put(part, assignment); + if (assignment != null) + assignments.computeIfAbsent(grpId, k -> new HashMap<>()).put(part, assignment); } /** {@inheritDoc} */ @@ -2572,138 +2890,4 @@ void add(Integer grpId, Integer part, UUID waitNode, List assignmen ", grps=" + (waitGrps != null ? waitGrps.keySet() : null) + ']'; } } - - /** - * - */ - class CachesInfo { - /** Registered cache groups (updated from exchange thread). */ - private final ConcurrentHashMap registeredGrps = new ConcurrentHashMap<>(); - - /** Registered caches (updated from exchange thread). */ - private final ConcurrentHashMap registeredCaches = new ConcurrentHashMap<>(); - - /** - * @param grps Registered groups. - * @param caches Registered caches. - */ - void init(Map grps, Map caches) { - for (CacheGroupDescriptor grpDesc : grps.values()) - registerGroup(grpDesc); - - for (DynamicCacheDescriptor cacheDesc : caches.values()) - registerCache(cacheDesc); - } - - - /** - * @param desc Description. - */ - private DynamicCacheDescriptor registerCache(DynamicCacheDescriptor desc) { - saveCacheConfiguration(desc.cacheConfiguration(), desc.sql()); - - return registeredCaches.put(desc.cacheId(), desc); - } - - /** - * @param grpDesc Group description. - */ - private CacheGroupDescriptor registerGroup(CacheGroupDescriptor grpDesc) { - return registeredGrps.put(grpDesc.groupId(), grpDesc); - } - - /** - * @return All registered groups. - */ - Collection allGroups() { - return registeredGrps.values(); - } - - /** - * @param grpId Group ID. - * @return Group descriptor. - */ - CacheGroupDescriptor group(int grpId) { - CacheGroupDescriptor desc = registeredGrps.get(grpId); - - assert desc != null : grpId; - - return desc; - } - - /** - * @param descs Cache descriptor. - */ - void initStartedCaches(Collection descs) { - for (DynamicCacheDescriptor desc : descs) { - CacheGroupDescriptor grpDesc = desc.groupDescriptor(); - - if (!registeredGrps.containsKey(grpDesc.groupId())) - registerGroup(grpDesc); - - if (!registeredCaches.containsKey(desc.cacheId())) - registerCache(desc); - } - } - - /** - * @param exchActions Exchange actions. - */ - void updateCachesInfo(ExchangeActions exchActions) { - for (ExchangeActions.CacheGroupActionData stopAction : exchActions.cacheGroupsToStop()) { - CacheGroupDescriptor rmvd = registeredGrps.remove(stopAction.descriptor().groupId()); - - assert rmvd != null : stopAction.descriptor().cacheOrGroupName(); - } - - for (ExchangeActions.CacheGroupActionData startAction : exchActions.cacheGroupsToStart()) { - CacheGroupDescriptor old = registerGroup(startAction.descriptor()); - - assert old == null : old; - } - - for (ExchangeActions.CacheActionData req : exchActions.cacheStopRequests()) - registeredCaches.remove(req.descriptor().cacheId()); - - for (ExchangeActions.CacheActionData req : exchActions.cacheStartRequests()) - registerCache(req.descriptor()); - } - - /** - * @param cacheId Cache ID. - * @return Cache descriptor if cache found. - */ - @Nullable DynamicCacheDescriptor cache(Integer cacheId) { - return registeredCaches.get(cacheId); - } - - /** - * - */ - void clear() { - registeredGrps.clear(); - - registeredCaches.clear(); - } - } - - /** - * @param cfg cache configuration - * @param sql SQL flag. - */ - private void saveCacheConfiguration(CacheConfiguration cfg, boolean sql) { - if (cctx.pageStore() != null && CU.isPersistentCache(cfg, cctx.gridConfig().getDataStorageConfiguration()) && - !cctx.kernalContext().clientNode()) { - try { - StoredCacheData data = new StoredCacheData(cfg); - - data.sql(sql); - - cctx.pageStore().storeCacheData(data, false); - } - catch (IgniteCheckedException e) { - U.error(log(), "Error while saving cache configuration on disk, cfg = " + cfg, e); - } - } - } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheClassLoaderMarker.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheClassLoaderMarker.java new file mode 100644 index 0000000000000..b575ec797ed4c --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheClassLoaderMarker.java @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.processors.cache; + +/** + * It's a marker interface for detecting GridCacheDeploymentManager$CacheClassLoader. + */ +public interface CacheClassLoaderMarker { + // Marker interface. +} \ No newline at end of file diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheClusterMetricsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheClusterMetricsMXBeanImpl.java index ce6416fa0c766..8935a98ea6f69 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheClusterMetricsMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheClusterMetricsMXBeanImpl.java @@ -114,6 +114,11 @@ class CacheClusterMetricsMXBeanImpl implements CacheMetricsMXBean { return cache.clusterMetrics().getSize(); } + /** {@inheritDoc} */ + @Override public long getCacheSize() { + return cache.clusterMetrics().getCacheSize(); + } + /** {@inheritDoc} */ @Override public int getKeySize() { return cache.clusterMetrics().getKeySize(); @@ -246,7 +251,12 @@ class CacheClusterMetricsMXBeanImpl implements CacheMetricsMXBean { /** {@inheritDoc} */ @Override public void clear() { - throw new UnsupportedOperationException("Cluster metrics can't be cleared. Use local metrics clear instead."); + try { + cache.context().shared().cache().clearStatistics(Collections.singleton(cache.name())); + } + catch (Exception e) { + throw new RuntimeException(e.getMessage()); + } } /** {@inheritDoc} */ @@ -364,6 +374,16 @@ class CacheClusterMetricsMXBeanImpl implements CacheMetricsMXBean { return cache.clusterMetrics().getTotalPartitionsCount(); } + /** {@inheritDoc} */ + @Override public long getRebalancedKeys() { + return cache.clusterMetrics().getRebalancedKeys(); + } + + /** {@inheritDoc} */ + @Override public long getEstimatedRebalancingKeys() { + return cache.clusterMetrics().getEstimatedRebalancingKeys(); + } + /** {@inheritDoc} */ @Override public int getRebalancingPartitionsCount() { return cache.clusterMetrics().getRebalancingPartitionsCount(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheConflictResolutionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheConflictResolutionManager.java index 6d65d828fc298..9790f754a8d5a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheConflictResolutionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheConflictResolutionManager.java @@ -17,7 +17,7 @@ package org.apache.ignite.internal.processors.cache; -import org.apache.ignite.internal.processors.cache.version.*; +import org.apache.ignite.internal.processors.cache.version.CacheVersionConflictResolver; /** * Conflict resolver manager. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheDefaultBinaryAffinityKeyMapper.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheDefaultBinaryAffinityKeyMapper.java index 43506873fb427..385ed59cfd36a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheDefaultBinaryAffinityKeyMapper.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheDefaultBinaryAffinityKeyMapper.java @@ -63,7 +63,7 @@ public CacheDefaultBinaryAffinityKeyMapper(@Nullable CacheKeyConfiguration[] cac /** {@inheritDoc} */ @Override public Object affinityKey(Object key) { try { - key = proc.toBinary(key); + key = proc.toBinary(key, false); } catch (IgniteException e) { U.error(log, "Failed to marshal key to binary: " + key, e); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheDiagnosticManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheDiagnosticManager.java new file mode 100644 index 0000000000000..12bf89da89615 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheDiagnosticManager.java @@ -0,0 +1,135 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import javax.management.InstanceNotFoundException; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.configuration.IgniteConfiguration; +import org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.PageLockTrackerMXBean; +import org.apache.ignite.internal.processors.cache.persistence.diagnostic.pagelocktracker.PageLockTrackerManager; +import org.apache.ignite.internal.util.typedef.internal.U; + +/** + * Component for manage all cache diagnostic functionality. + */ +public class CacheDiagnosticManager extends GridCacheSharedManagerAdapter { + /** Diagnostic mxbeans group name. */ + public static final String MBEAN_GROUP = "Diagnostic"; + + /** Page lock tracker manager */ + private PageLockTrackerManager pageLockTrackerManager; + + /** {@inheritDoc} */ + @Override protected void start0() throws IgniteCheckedException { + super.start0(); + + String name = cctx.kernalContext().pdsFolderResolver().resolveFolders().consistentId().toString(); + + pageLockTrackerManager = new PageLockTrackerManager(log, name); + + pageLockTrackerManager.start(); + + registerMetricsMBean( + cctx.gridConfig(), + MBEAN_GROUP, + PageLockTrackerMXBean.MBEAN_NAME, + pageLockTrackerManager.mxBean(), + PageLockTrackerMXBean.class + ); + } + + /** {@inheritDoc} */ + @Override protected void stop0(boolean cancel) { + super.stop0(cancel); + + unregisterMetricsMBean(cctx.gridConfig(), MBEAN_GROUP, PageLockTrackerMXBean.MBEAN_NAME); + + pageLockTrackerManager.stop(); + } + + /** + * Getter. + * + * @return Page lock tracker mananger. + */ + public PageLockTrackerManager pageLockTracker() { + return pageLockTrackerManager; + } + + /** + * @param cfg Ignite configuration. + * @param groupName Name of group. + * @param mbeanName Metrics MBean name. + * @param impl Metrics implementation. + * @param clazz Metrics class type. + */ + protected void registerMetricsMBean( + IgniteConfiguration cfg, + String groupName, + String mbeanName, + T impl, + Class clazz + ) { + if (U.IGNITE_MBEANS_DISABLED) + return; + + try { + U.registerMBean( + cfg.getMBeanServer(), + cfg.getIgniteInstanceName(), + groupName, + mbeanName, + impl, + clazz); + } + catch (Throwable e) { + U.error(log, "Failed to register MBean with name: " + mbeanName, e); + } + } + + /** + * @param cfg Ignite configuration. + * @param groupName Name of group. + * @param name Name of MBean. + */ + protected void unregisterMetricsMBean( + IgniteConfiguration cfg, + String groupName, + String name + ) { + if (U.IGNITE_MBEANS_DISABLED) + return; + + assert cfg != null; + + try { + cfg.getMBeanServer().unregisterMBean( + U.makeMBeanName( + cfg.getIgniteInstanceName(), + groupName, + name + )); + } + catch (InstanceNotFoundException ignored) { + // We tried to unregister a non-existing MBean, not a big deal. + } + catch (Throwable e) { + U.error(log, "Failed to unregister MBean for memory metrics: " + name, e); + } + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryInfoCollection.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryInfoCollection.java index 49f77fa808dba..c21f5b6099722 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryInfoCollection.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryInfoCollection.java @@ -116,4 +116,23 @@ public void add(GridCacheEntryInfo info) { @Override public byte fieldsCount() { return 1; } -} \ No newline at end of file + + /** {@inheritDoc} */ + @Override public String toString() { + StringBuilder b = new StringBuilder(); + b.append("["); + + for (int i = 0; i < infos().size(); i++) { + GridCacheEntryInfo info = infos().get(i); + + Object k = info.key().value(null, false); + + b.append("[key=").append(k == null ? "null" : k).append(", ver="). + append(info.version()).append(", val=").append(info.value()).append(']'); + } + + b.append(']'); + + return b.toString(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicate.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicate.java index 61cbb9e04fb54..36312a1591135 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicate.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicate.java @@ -42,4 +42,4 @@ public interface CacheEntryPredicate extends IgnitePredicate, * @param locked Entry locked */ public void entryLocked(boolean locked); -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateAdapter.java index e41938997daa2..62325323e1d3e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateAdapter.java @@ -97,4 +97,4 @@ public abstract class CacheEntryPredicateAdapter implements CacheEntryPredicate @Override public void onAckReceived() { // No-op. } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateContainsValue.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateContainsValue.java index 76806a44f5cfd..b5fde216f7708 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateContainsValue.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateContainsValue.java @@ -19,6 +19,7 @@ import java.nio.ByteBuffer; import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.binary.BinaryObject; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.internal.CU; @@ -58,8 +59,17 @@ public CacheEntryPredicateContainsValue(CacheObject val) { @Override public boolean apply(GridCacheEntryEx e) { CacheObject val = peekVisibleValue(e); + if (this.val == null && val == null) + return true; + + if (this.val == null || val == null) + return false; + GridCacheContext cctx = e.context(); + if (this.val instanceof BinaryObject && val instanceof BinaryObject) + return F.eq(val, this.val); + Object thisVal = CU.value(this.val, cctx, false); Object cacheVal = CU.value(val, cctx, false); @@ -140,4 +150,4 @@ public CacheEntryPredicateContainsValue(CacheObject val) { @Override public String toString() { return S.toString(CacheEntryPredicateContainsValue.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateHasValue.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateHasValue.java index cac04357a5816..210cc7059b21d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateHasValue.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateHasValue.java @@ -28,4 +28,4 @@ public class CacheEntryPredicateHasValue extends CacheEntryPredicateAdapter { @Override public boolean apply(GridCacheEntryEx e) { return peekVisibleValue(e) != null; } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateNoValue.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateNoValue.java index 2790170e959e8..4c8917fcc68be 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateNoValue.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntryPredicateNoValue.java @@ -28,4 +28,4 @@ public class CacheEntryPredicateNoValue extends CacheEntryPredicateAdapter { @Override public boolean apply(GridCacheEntryEx e) { return peekVisibleValue(e) == null; } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntrySerializablePredicate.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntrySerializablePredicate.java index 9057e41fbfbfc..257433636e6b4 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntrySerializablePredicate.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEntrySerializablePredicate.java @@ -156,4 +156,4 @@ public CacheEntryPredicate predicate() { @Override public byte fieldsCount() { return 1; } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEvictionEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEvictionEntry.java index 2717b1e03e22e..96b85df2f0721 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEvictionEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEvictionEntry.java @@ -185,4 +185,4 @@ public void finishUnmarshal(GridCacheContext ctx, ClassLoader ldr) throws Ignite @Override public byte fieldsCount() { return 3; } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEvictionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEvictionManager.java index b614728f25946..2a9a0e8b10d76 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEvictionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheEvictionManager.java @@ -19,7 +19,6 @@ import java.util.Collection; import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.jetbrains.annotations.Nullable; @@ -35,10 +34,9 @@ public interface CacheEvictionManager extends GridCacheManager { public void touch(IgniteTxEntry txEntry, boolean loc); /** - * @param e Entry for eviction policy notification. - * @param topVer Topology version. + * @param e Entry for eviction policy notification. */ - public void touch(GridCacheEntryEx e, AffinityTopologyVersion topVer); + public void touch(GridCacheEntryEx e); /** * @param entry Entry to attempt to evict. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java index 12636f3bd0ef5..2ccf405c30480 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupContext.java @@ -20,10 +20,11 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; -import java.util.Iterator; +import java.util.Collections; import java.util.List; import java.util.Set; import java.util.UUID; +import java.util.concurrent.atomic.AtomicBoolean; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cache.affinity.AffinityFunction; @@ -39,16 +40,20 @@ import org.apache.ignite.internal.processors.affinity.GridAffinityAssignmentCache; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAffinityAssignmentRequest; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAffinityAssignmentResponse; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionsEvictor; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopologyImpl; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPreloader; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopologyImpl; import org.apache.ignite.internal.processors.cache.persistence.DataRegion; import org.apache.ignite.internal.processors.cache.persistence.GridCacheOffheapManager; import org.apache.ignite.internal.processors.cache.persistence.freelist.FreeList; import org.apache.ignite.internal.processors.cache.persistence.tree.reuse.ReuseList; import org.apache.ignite.internal.processors.cache.query.continuous.CounterSkipContext; import org.apache.ignite.internal.processors.query.QueryUtils; +import org.apache.ignite.internal.stat.IoStatisticsHolder; +import org.apache.ignite.internal.stat.IoStatisticsHolderNoOp; +import org.apache.ignite.internal.stat.IoStatisticsType; import org.apache.ignite.internal.util.typedef.CI1; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.LT; @@ -56,15 +61,18 @@ import org.apache.ignite.lang.IgniteBiInClosure; import org.apache.ignite.lang.IgniteFuture; import org.apache.ignite.lang.IgnitePredicate; -import org.apache.ignite.lang.IgniteUuid; import org.apache.ignite.mxbean.CacheGroupMetricsMXBean; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; import static org.apache.ignite.cache.CacheMode.LOCAL; import static org.apache.ignite.cache.CacheMode.REPLICATED; import static org.apache.ignite.cache.CacheRebalanceMode.NONE; +import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_PART_MISSED; +import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_PART_SUPPLIED; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_PART_UNLOADED; import static org.apache.ignite.internal.managers.communication.GridIoPolicy.AFFINITY_POOL; +import static org.apache.ignite.internal.stat.IoStatisticsHolderIndex.HASH_PK_IDX_NAME; /** * @@ -77,13 +85,13 @@ public class CacheGroupContext { private final int grpId; /** Node ID cache group was received from. */ - private final UUID rcvdFrom; + private volatile UUID rcvdFrom; /** Flag indicating that this cache group is in a recovery mode due to partitions loss. */ private boolean needsRecovery; /** */ - private final AffinityTopologyVersion locStartVer; + private volatile AffinityTopologyVersion locStartVer; /** */ private final CacheConfiguration ccfg; @@ -92,7 +100,7 @@ public class CacheGroupContext { private final GridCacheSharedContext ctx; /** */ - private final boolean affNode; + private volatile boolean affNode; /** */ private final CacheType cacheType; @@ -106,8 +114,8 @@ public class CacheGroupContext { /** */ private final boolean storeCacheId; - /** */ - private volatile List caches; + /** We modify content under lock, by making defencive copy, field always contains unmodifiable list. */ + private volatile List caches = Collections.unmodifiableList(new ArrayList<>()); /** */ private volatile List contQryCaches; @@ -116,23 +124,23 @@ public class CacheGroupContext { private final IgniteLogger log; /** */ - private GridAffinityAssignmentCache aff; + private volatile GridAffinityAssignmentCache aff; /** */ - private GridDhtPartitionTopologyImpl top; + private volatile GridDhtPartitionTopologyImpl top; /** */ - private IgniteCacheOffheapManager offheapMgr; + private volatile IgniteCacheOffheapManager offheapMgr; /** */ - private GridCachePreloader preldr; - - /** Partition evictor. */ - private GridDhtPartitionsEvictor evictor; + private volatile GridCachePreloader preldr; /** */ private final DataRegion dataRegion; + /** Persistence enabled flag. */ + private final boolean persistenceEnabled; + /** */ private final CacheObjectContext cacheObjCtx; @@ -143,20 +151,35 @@ public class CacheGroupContext { private final ReuseList reuseList; /** */ - private boolean drEnabled; + private volatile boolean drEnabled; /** */ - private boolean qryEnabled; + private volatile boolean qryEnabled; /** MXBean. */ - private CacheGroupMetricsMXBean mxBean; + private final CacheGroupMetricsMXBean mxBean; /** */ - private volatile boolean walEnabled; + private volatile boolean localWalEnabled; + + /** */ + private volatile boolean globalWalEnabled; + + /** Flag indicates that cache group is under recovering and not attached to topology. */ + private final AtomicBoolean recoveryMode; + + /** Statistics holder to track IO operations for PK index pages. */ + private final IoStatisticsHolder statHolderIdx; + + /** Statistics holder to track IO operations for data pages. */ + private final IoStatisticsHolder statHolderData; + + /** */ + private volatile boolean hasAtomicCaches; /** - * @param grpId Group ID. * @param ctx Context. + * @param grpId Group ID. * @param rcvdFrom Node ID cache group was received from. * @param cacheType Cache type. * @param ccfg Cache configuration. @@ -166,6 +189,7 @@ public class CacheGroupContext { * @param freeList Free list. * @param reuseList Reuse list. * @param locStartVer Topology version when group was started on local node. + * @param persistenceEnabled Persistence enabled flag. * @param walEnabled Wal enabled flag. */ CacheGroupContext( @@ -180,7 +204,10 @@ public class CacheGroupContext { FreeList freeList, ReuseList reuseList, AffinityTopologyVersion locStartVer, - boolean walEnabled) { + boolean persistenceEnabled, + boolean walEnabled, + boolean recoveryMode + ) { assert ccfg != null; assert dataRegion != null || !affNode; assert grpId != 0 : "Invalid group ID [cache=" + ccfg.getName() + ", grpName=" + ccfg.getGroupName() + ']'; @@ -196,9 +223,10 @@ public class CacheGroupContext { this.reuseList = reuseList; this.locStartVer = locStartVer; this.cacheType = cacheType; - this.walEnabled = walEnabled; - - persistWalState(walEnabled); + this.globalWalEnabled = walEnabled; + this.persistenceEnabled = persistenceEnabled; + this.localWalEnabled = true; + this.recoveryMode = new AtomicBoolean(recoveryMode); ioPlc = cacheType.ioPolicy(); @@ -208,9 +236,19 @@ public class CacheGroupContext { log = ctx.kernalContext().log(getClass()); - caches = new ArrayList<>(); - mxBean = new CacheGroupMetricsMXBeanImpl(this); + + if (systemCache()) { + statHolderIdx = IoStatisticsHolderNoOp.INSTANCE; + statHolderData = IoStatisticsHolderNoOp.INSTANCE; + } + else { + statHolderIdx = ctx.kernalContext().ioStats().register(IoStatisticsType.HASH_INDEX, + cacheOrGroupName(), HASH_PK_IDX_NAME); + + statHolderData = ctx.kernalContext().ioStats().register(IoStatisticsType.CACHE_GROUP, + cacheOrGroupName()); + } } /** @@ -248,13 +286,6 @@ public GridCachePreloader preloader() { return preldr; } - /** - * @return Partitions evictor. - */ - public GridDhtPartitionsEvictor evictor() { - return evictor; - } - /** * @return IO policy for the given cache group. */ @@ -279,10 +310,9 @@ void onCacheStarted(GridCacheContext cctx) throws IgniteCheckedException { public boolean hasCache(String cacheName) { List caches = this.caches; - for (int i = 0; i < caches.size(); i++) { - if (caches.get(i).name().equals(cacheName)) + for (GridCacheContext cacheContext : caches) + if (cacheContext.name().equals(cacheName)) return true; - } return false; } @@ -294,11 +324,17 @@ private void addCacheContext(GridCacheContext cctx) { assert cacheType.userCache() == cctx.userCache() : cctx.name(); assert grpId == cctx.groupId() : cctx.name(); - ArrayList caches = new ArrayList<>(this.caches); + final boolean add; + + synchronized (this) { + List copy = new ArrayList<>(caches); - assert sharedGroup() || caches.isEmpty(); + assert sharedGroup() || copy.isEmpty(); - boolean add = caches.add(cctx); + add = copy.add(cctx); + + caches = Collections.unmodifiableList(copy); + } assert add : cctx.name(); @@ -308,38 +344,41 @@ private void addCacheContext(GridCacheContext cctx) { if (!drEnabled && cctx.isDrEnabled()) drEnabled = true; - this.caches = caches; - } + if (!hasAtomicCaches) + hasAtomicCaches = cctx.config().getAtomicityMode() == ATOMIC; + } /** * @param cctx Cache context. */ private void removeCacheContext(GridCacheContext cctx) { - ArrayList caches = new ArrayList<>(this.caches); + final List copy; - // It is possible cache was not added in case of errors on cache start. - for (Iterator it = caches.iterator(); it.hasNext();) { - GridCacheContext next = it.next(); + synchronized (this) { + copy = new ArrayList<>(caches); - if (next == cctx) { - assert sharedGroup() || caches.size() == 1 : caches.size(); + for (GridCacheContext next : copy) { + if (next == cctx) { + assert sharedGroup() || copy.size() == 1 : copy.size(); - it.remove(); + copy.remove(next); - break; + break; + } } + + caches = Collections.unmodifiableList(copy); } if (QueryUtils.isEnabled(cctx.config())) { boolean qryEnabled = false; - for (int i = 0; i < caches.size(); i++) { - if (QueryUtils.isEnabled(caches.get(i).config())) { + for (GridCacheContext cacheContext : copy) + if (QueryUtils.isEnabled(cacheContext.config())) { qryEnabled = true; break; } - } this.qryEnabled = qryEnabled; } @@ -347,18 +386,15 @@ private void removeCacheContext(GridCacheContext cctx) { if (cctx.isDrEnabled()) { boolean drEnabled = false; - for (int i = 0; i < caches.size(); i++) { - if (caches.get(i).isDrEnabled()) { + for (GridCacheContext cacheContext : copy) + if (QueryUtils.isEnabled(cacheContext.config())) { drEnabled = true; break; } - } this.drEnabled = drEnabled; } - - this.caches = caches; } /** @@ -368,8 +404,8 @@ public GridCacheContext singleCacheContext() { List caches = this.caches; assert !sharedGroup() && caches.size() == 1 : - "stopping=" + ctx.kernalContext().isStopping() + ", groupName=" + ccfg.getGroupName() + - ", caches=" + caches; + "stopping=" + ctx.kernalContext().isStopping() + ", groupName=" + ccfg.getGroupName() + + ", caches=" + caches; return caches.get(0); } @@ -380,11 +416,8 @@ public GridCacheContext singleCacheContext() { public void unwindUndeploys() { List caches = this.caches; - for (int i = 0; i < caches.size(); i++) { - GridCacheContext cctx = caches.get(i); - + for (GridCacheContext cctx : caches) cctx.deploy().unwind(cctx); - } } /** @@ -415,9 +448,7 @@ public void addRebalanceEvent(int part, int type, ClusterNode discoNode, int dis List caches = this.caches; - for (int i = 0; i < caches.size(); i++) { - GridCacheContext cctx = caches.get(i); - + for (GridCacheContext cctx : caches) if (!cctx.config().isEventsDisabled() && cctx.recordEvent(type)) { cctx.gridEvents().record(new CacheRebalancingEvent(cctx.name(), cctx.localNode(), @@ -428,8 +459,8 @@ public void addRebalanceEvent(int part, int type, ClusterNode discoNode, int dis discoType, discoTs)); } - } } + /** * Adds partition unload event. * @@ -442,9 +473,7 @@ public void addUnloadEvent(int part) { List caches = this.caches; - for (int i = 0; i < caches.size(); i++) { - GridCacheContext cctx = caches.get(i); - + for (GridCacheContext cctx : caches) if (!cctx.config().isEventsDisabled()) cctx.gridEvents().record(new CacheRebalancingEvent(cctx.name(), cctx.localNode(), @@ -454,7 +483,54 @@ public void addUnloadEvent(int part) { null, 0, 0)); - } + } + + /** + * Adds partition supply event. + * + * @param part Partition. + */ + public void addRebalanceSupplyEvent(int part) { + if (!eventRecordable(EVT_CACHE_REBALANCE_PART_SUPPLIED)) + LT.warn(log, "Added event without checking if event is recordable: " + + U.gridEventName(EVT_CACHE_REBALANCE_PART_SUPPLIED)); + + List caches = this.caches; + + for (GridCacheContext cctx : caches) + if (!cctx.config().isEventsDisabled()) + cctx.gridEvents().record(new CacheRebalancingEvent(cctx.name(), + cctx.localNode(), + "Cache partition supplied event.", + EVT_CACHE_REBALANCE_PART_SUPPLIED, + part, + null, + 0, + 0)); + } + + /** + * Adds partition supply event. + * + * @param part Partition. + */ + public void addRebalanceMissEvent(int part) { + if (!eventRecordable(EVT_CACHE_REBALANCE_PART_MISSED)) + LT.warn(log, "Added event without checking if event is recordable: " + + U.gridEventName(EVT_CACHE_REBALANCE_PART_MISSED)); + + List caches = this.caches; + + for (GridCacheContext cctx : caches) + if (!cctx.config().isEventsDisabled()) + cctx.gridEvents().record(new CacheRebalancingEvent(cctx.name(), + cctx.localNode(), + "Cache partition missed event.", + EVT_CACHE_REBALANCE_PART_MISSED, + part, + null, + 0, + 0)); } /** @@ -481,14 +557,13 @@ public void addCacheEvent( ) { List caches = this.caches; - for (int i = 0; i < caches.size(); i++) { - GridCacheContext cctx = caches.get(i); - + for (GridCacheContext cctx : caches) if (!cctx.config().isEventsDisabled()) cctx.events().addEvent(part, key, evtNodeId, - (IgniteUuid)null, + null, + null, null, type, newVal, @@ -499,7 +574,6 @@ public void addCacheEvent( null, null, keepBinary); - } } /** @@ -509,13 +583,6 @@ public boolean queriesEnabled() { return qryEnabled; } - /** - * @return {@code True} if fast eviction is allowed. - */ - public boolean allowFastEviction() { - return persistenceEnabled() && !queriesEnabled(); - } - /** * @return {@code True} in case replication is enabled. */ @@ -575,6 +642,16 @@ public GridDhtPartitionTopology topology() { return top; } + /** + * @return {@code True} if current thread holds lock on topology. + */ + public boolean isTopologyLocked() { + if (top == null) + return false; + + return top.holdsLock(); + } + /** * @return Offheap manager. */ @@ -691,9 +768,11 @@ public boolean sharedGroup() { * */ public void onKernalStop() { - aff.cancelFutures(new IgniteCheckedException("Failed to wait for topology update, node is stopping.")); + if (!isRecoveryMode()) { + aff.cancelFutures(new IgniteCheckedException("Failed to wait for topology update, node is stopping.")); - preldr.onKernalStop(); + preldr.onKernalStop(); + } offheapMgr.onKernalStop(); } @@ -715,18 +794,100 @@ void stopCache(GridCacheContext cctx, boolean destroy) { * */ void stopGroup() { + offheapMgr.stop(); + + if (isRecoveryMode()) + return; + IgniteCheckedException err = new IgniteCheckedException("Failed to wait for topology update, cache (or node) is stopping."); + ctx.evict().onCacheGroupStopped(this); + aff.cancelFutures(err); preldr.onKernalStop(); - offheapMgr.stop(); - ctx.io().removeCacheGroupHandlers(grpId); } + /** + * Finishes recovery for current cache group. + * Attaches topology version and initializes I/O. + * + * @param startVer Cache group start version. + * @param originalReceivedFrom UUID of node that was first who initiated cache group creating. + * This is needed to decide should node calculate affinity locally or fetch from other nodes. + * @param affinityNode Flag indicates, is local node affinity node or not. This may be calculated only after node joined to topology. + * @throws IgniteCheckedException If failed. + */ + public void finishRecovery( + AffinityTopologyVersion startVer, + UUID originalReceivedFrom, + boolean affinityNode + ) throws IgniteCheckedException { + if (!recoveryMode.compareAndSet(true, false)) + return; + + affNode = affinityNode; + + rcvdFrom = originalReceivedFrom; + + locStartVer = startVer; + + persistGlobalWalState(globalWalEnabled); + + initializeIO(); + + ctx.affinity().onCacheGroupCreated(this); + } + + /** + * @param part Partition to restore state for. + * @param stateId State enum ordinal. + * @return Updated flag. + */ + private boolean updateState(GridDhtLocalPartition part, int stateId) { + if (stateId != -1) { + GridDhtPartitionState state = GridDhtPartitionState.fromOrdinal(stateId); + + assert state != null; + + part.restoreState(state == GridDhtPartitionState.EVICTED ? GridDhtPartitionState.RENTING : state); + + return true; + } + + return false; + } + + /** + * @return {@code True} if current cache group is in recovery mode. + */ + public boolean isRecoveryMode() { + return recoveryMode.get(); + } + + /** + * Initializes affinity and rebalance I/O handlers. + */ + private void initializeIO() throws IgniteCheckedException { + assert !recoveryMode.get() : "Couldn't initialize I/O handlers, recovery mode is on for group " + this; + + if (ccfg.getCacheMode() != LOCAL) { + if (!ctx.kernalContext().clientNode()) { + ctx.io().addCacheGroupHandler(groupId(), GridDhtAffinityAssignmentRequest.class, + (IgniteBiInClosure) this::processAffinityAssignmentRequest); + } + + preldr = new GridDhtPreloader(this); + + preldr.start(); + } + else + preldr = new GridCachePreloaderAdapter(this); + } + /** * @return IDs of caches in this group. */ @@ -735,23 +896,25 @@ public Set cacheIds() { Set ids = U.newHashSet(caches.size()); - for (int i = 0; i < caches.size(); i++) - ids.add(caches.get(i).cacheId()); + for (GridCacheContext cctx : caches) + ids.add(cctx.cacheId()); return ids; } /** * @return Caches in this group. + * + * caches is already Unmodifiable list, so we don't need to explicitly wrap it here. */ public List caches() { - return this.caches; + return caches; } /** * @return {@code True} if group contains caches. */ - boolean hasCaches() { + public boolean hasCaches() { List caches = this.caches; return !caches.isEmpty(); @@ -763,15 +926,11 @@ boolean hasCaches() { public void onPartitionEvicted(int part) { List caches = this.caches; - for (int i = 0; i < caches.size(); i++) { - GridCacheContext cctx = caches.get(i); - + for (GridCacheContext cctx : caches) { if (cctx.isDrEnabled()) cctx.dr().partitionEvicted(part); cctx.continuousQueries().onPartitionEvicted(part); - - cctx.dataStructures().onPartitionEvicted(part); } } @@ -864,65 +1023,53 @@ public void onPartitionCounterUpdate(int cacheId, * @throws IgniteCheckedException If failed. */ public void start() throws IgniteCheckedException { - aff = new GridAffinityAssignmentCache(ctx.kernalContext(), - cacheOrGroupName(), - grpId, - ccfg.getAffinity(), - ccfg.getNodeFilter(), - ccfg.getBackups(), - ccfg.getCacheMode() == LOCAL, - persistenceEnabled()); + GridAffinityAssignmentCache affCache = ctx.affinity().groupAffinity(grpId); - if (ccfg.getCacheMode() != LOCAL) { + if (affCache != null) + aff = affCache; + else + aff = new GridAffinityAssignmentCache(ctx.kernalContext(), + cacheOrGroupName(), + grpId, + ccfg.getAffinity(), + ccfg.getNodeFilter(), + ccfg.getBackups(), + ccfg.getCacheMode() == LOCAL, + persistenceEnabled()); + + if (ccfg.getCacheMode() != LOCAL) top = new GridDhtPartitionTopologyImpl(ctx, this); - if (!ctx.kernalContext().clientNode()) { - ctx.io().addCacheGroupHandler(groupId(), GridDhtAffinityAssignmentRequest.class, - new IgniteBiInClosure() { - @Override public void apply(UUID nodeId, GridDhtAffinityAssignmentRequest msg) { - processAffinityAssignmentRequest(nodeId, msg); - } - }); - } - - preldr = new GridDhtPreloader(this); - - preldr.start(); + try { + offheapMgr = persistenceEnabled + ? new GridCacheOffheapManager() + : new IgniteCacheOffheapManagerImpl(); } - else - preldr = new GridCachePreloaderAdapter(this); - - evictor = new GridDhtPartitionsEvictor(this); - - if (persistenceEnabled()) { - try { - offheapMgr = new GridCacheOffheapManager(); - } - catch (Exception e) { - throw new IgniteCheckedException("Failed to initialize offheap manager", e); - } + catch (Exception e) { + throw new IgniteCheckedException("Failed to initialize offheap manager", e); } - else - offheapMgr = new IgniteCacheOffheapManagerImpl(); offheapMgr.start(ctx, this); - ctx.affinity().onCacheGroupCreated(this); + if (!isRecoveryMode()) { + initializeIO(); + + ctx.affinity().onCacheGroupCreated(this); + } } /** * @return Persistence enabled flag. */ public boolean persistenceEnabled() { - return dataRegion != null && dataRegion.config().isPersistenceEnabled(); + return persistenceEnabled; } /** * @param nodeId Node ID. * @param req Request. */ - private void processAffinityAssignmentRequest(final UUID nodeId, - final GridDhtAffinityAssignmentRequest req) { + private void processAffinityAssignmentRequest(UUID nodeId, GridDhtAffinityAssignmentRequest req) { if (log.isDebugEnabled()) log.debug("Processing affinity assignment request [node=" + nodeId + ", req=" + req + ']'); @@ -1021,22 +1168,85 @@ public CacheGroupMetricsMXBean mxBean() { * WAL enabled flag. */ public boolean walEnabled() { - return walEnabled; + return localWalEnabled && globalWalEnabled; + } + + /** + * Local WAL enabled flag. + */ + public boolean localWalEnabled() { + return localWalEnabled; + } + + /** + * @return Global WAL enabled flag. + */ + public boolean globalWalEnabled() { + return globalWalEnabled; + } + + /** + * @param enabled Global WAL enabled flag. + */ + public void globalWalEnabled(boolean enabled) { + if (globalWalEnabled != enabled) { + if (log.isInfoEnabled()) + log.info("Global WAL state for group=" + cacheOrGroupName() + + " changed from " + globalWalEnabled + " to " + enabled); + + persistGlobalWalState(enabled); + + globalWalEnabled = enabled; + } } /** - * @param enabled WAL enabled flag. + * @param enabled Local WAL enabled flag. */ - public void walEnabled(boolean enabled) { - persistWalState(enabled); + public void localWalEnabled(boolean enabled) { + if (localWalEnabled != enabled){ + if (log.isInfoEnabled()) + log.info("Local WAL state for group=" + cacheOrGroupName() + + " changed from " + localWalEnabled + " to " + enabled); + + persistLocalWalState(enabled); - this.walEnabled = enabled; + localWalEnabled = enabled; + } + } + + /** + * @param enabled Enabled flag.. + */ + private void persistGlobalWalState(boolean enabled) { + shared().database().walEnabled(grpId, enabled, false); } /** * @param enabled Enabled flag.. */ - private void persistWalState(boolean enabled) { - shared().database().walEnabled(grpId, enabled); + private void persistLocalWalState(boolean enabled) { + shared().database().walEnabled(grpId, enabled, true); + } + + /** + * @return {@code True} if group has atomic caches. + */ + public boolean hasAtomicCaches() { + return hasAtomicCaches; + } + + /** + * @return Statistics holder to track cache IO operations. + */ + public IoStatisticsHolder statisticsHolderIdx() { + return statHolderIdx; + } + + /** + * @return Statistics holder to track cache IO operations. + */ + public IoStatisticsHolder statisticsHolderData() { + return statHolderData; } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupDescriptor.java index 70cdcc735af62..e72de28a05674 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupDescriptor.java @@ -23,6 +23,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.UUID; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; @@ -311,4 +312,22 @@ public boolean persistenceEnabled() { @Override public String toString() { return S.toString(CacheGroupDescriptor.class, this, "cacheName", cacheCfg.getName()); } + + /** {@inheritDoc} */ + @Override public boolean equals(Object o) { + if (this == o) + return true; + + if (o == null || getClass() != o.getClass()) + return false; + + CacheGroupDescriptor that = (CacheGroupDescriptor) o; + + return grpId == that.grpId; + } + + /** {@inheritDoc} */ + @Override public int hashCode() { + return Objects.hash(grpId); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java index 0788ee9126a6a..5ece77f57ba4d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheGroupMetricsMXBeanImpl.java @@ -25,12 +25,18 @@ import java.util.Map; import java.util.Set; import java.util.UUID; +import java.util.concurrent.atomic.LongAdder; +import org.apache.ignite.cache.CacheMode; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.internal.processors.affinity.AffinityAssignment; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionFullMap; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionMap; +import org.apache.ignite.internal.processors.cache.persistence.AllocatedPageTracker; +import org.apache.ignite.internal.processors.cache.persistence.DataRegion; +import org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl; import org.apache.ignite.mxbean.CacheGroupMetricsMXBean; /** @@ -40,6 +46,9 @@ public class CacheGroupMetricsMXBeanImpl implements CacheGroupMetricsMXBean { /** Cache group context. */ private final CacheGroupContext ctx; + /** */ + private final GroupAllocationTracker groupPageAllocationTracker; + /** Interface describing a predicate of two integers. */ private interface IntBiPredicate { /** @@ -52,12 +61,48 @@ private interface IntBiPredicate { } /** - * Creates MBean; + * + */ + public static class GroupAllocationTracker implements AllocatedPageTracker { + /** */ + private final LongAdder totalAllocatedPages = new LongAdder(); + + /** */ + private final AllocatedPageTracker delegate; + + /** + * @param delegate Delegate allocation tracker. + */ + public GroupAllocationTracker(AllocatedPageTracker delegate) { + this.delegate = delegate; + } + + /** {@inheritDoc} */ + @Override public void updateTotalAllocatedPages(long delta) { + totalAllocatedPages.add(delta); + + delegate.updateTotalAllocatedPages(delta); + } + } + + /** + * Creates Group metrics MBean. * * @param ctx Cache group context. */ public CacheGroupMetricsMXBeanImpl(CacheGroupContext ctx) { this.ctx = ctx; + + DataRegion region = ctx.dataRegion(); + + // On client node, region is null. + if (region != null) { + DataRegionMetricsImpl dataRegionMetrics = ctx.dataRegion().memoryMetrics(); + + this.groupPageAllocationTracker = dataRegionMetrics.getOrAllocateGroupPageAllocationTracker(ctx.groupId()); + } + else + this.groupPageAllocationTracker = new GroupAllocationTracker(AllocatedPageTracker.NO_OP); } /** {@inheritDoc} */ @@ -174,14 +219,47 @@ private int clusterPartitionsCountByState(GridDhtPartitionState state) { return cnt; } + /** + * Count of partitions with a given state on the local node. + * + * @param state State. + */ + private int localNodePartitionsCountByState(GridDhtPartitionState state) { + int cnt = 0; + + for (GridDhtLocalPartition part : ctx.topology().localPartitions()) { + if (part.state() == state) + cnt++; + } + + return cnt; + } + /** {@inheritDoc} */ @Override public int getLocalNodeOwningPartitionsCount() { - return nodePartitionsCountByState(ctx.shared().localNodeId(), GridDhtPartitionState.OWNING); + return localNodePartitionsCountByState(GridDhtPartitionState.OWNING); } /** {@inheritDoc} */ @Override public int getLocalNodeMovingPartitionsCount() { - return nodePartitionsCountByState(ctx.shared().localNodeId(), GridDhtPartitionState.MOVING); + return localNodePartitionsCountByState(GridDhtPartitionState.MOVING); + } + + /** {@inheritDoc} */ + @Override public int getLocalNodeRentingPartitionsCount() { + return localNodePartitionsCountByState(GridDhtPartitionState.RENTING); + } + + /** {@inheritDoc} */ + @Override public long getLocalNodeRentingEntriesCount() { + long entriesCnt = 0; + + for (GridDhtLocalPartition part : ctx.topology().localPartitions()) { + if (part.state() == GridDhtPartitionState.RENTING) + entriesCnt += part.dataStore().fullSize(); + } + + return entriesCnt; } /** {@inheritDoc} */ @@ -252,4 +330,33 @@ private Map> clusterPartitionsMapByState(GridDhtPartitionSt return assignmentMap; } + + /** {@inheritDoc} */ + @Override public String getType() { + CacheMode type = ctx.config().getCacheMode(); + + return String.valueOf(type); + } + + /** {@inheritDoc} */ + @Override public List getPartitionIds() { + List parts = ctx.topology().localPartitions(); + + List partsRes = new ArrayList<>(parts.size()); + + for (GridDhtLocalPartition part : parts) + partsRes.add(part.id()); + + return partsRes; + } + + /** {@inheritDoc} */ + @Override public long getTotalAllocatedPages() { + return groupPageAllocationTracker.totalAllocatedPages.longValue(); + } + + /** {@inheritDoc} */ + @Override public long getTotalAllocatedSize() { + return getTotalAllocatedPages() * ctx.dataRegion().pageMemory().pageSize(); + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeDirectResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeDirectResult.java index 17f304eec809d..3f880339eb889 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeDirectResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeDirectResult.java @@ -40,6 +40,10 @@ public class CacheInvokeDirectResult implements Message { /** */ private KeyCacheObject key; + /** */ + @GridToStringInclude + private transient Object unprepareRes; + /** */ @GridToStringInclude private CacheObject res; @@ -68,6 +72,22 @@ public CacheInvokeDirectResult(KeyCacheObject key, CacheObject res) { this.res = res; } + /** + * Constructs CacheInvokeDirectResult with unprepared res, to avoid object marshaling while holding topology locks. + * + * @param key Key. + * @param res Result. + * @return a new instance of CacheInvokeDirectResult. + */ + static CacheInvokeDirectResult lazyResult(KeyCacheObject key, Object res) { + CacheInvokeDirectResult res0 = new CacheInvokeDirectResult(); + + res0.key = key; + res0.unprepareRes = res; + + return res0; + } + /** * @param key Key. * @param err Exception thrown by {@link EntryProcessor#process(MutableEntry, Object...)}. @@ -120,10 +140,27 @@ public void prepareMarshal(GridCacheContext ctx) throws IgniteCheckedException { } } + assert unprepareRes == null : "marshalResult() was not called for the result: " + this; + if (res != null) res.prepareMarshal(ctx.cacheObjectContext()); } + /** + * Converts the entry processor unprepared result to a cache object instance. + * + * @param ctx Cache context. + */ + public void marshalResult(GridCacheContext ctx) { + try { + if (unprepareRes != null) + res = ctx.toCacheObject(unprepareRes); + } + finally { + unprepareRes = null; + } + } + /** * @param ctx Cache context. * @param ldr Class loader. @@ -230,4 +267,4 @@ public void finishUnmarshal(GridCacheContext ctx, ClassLoader ldr) throws Ignite @Override public String toString() { return S.toString(CacheInvokeDirectResult.class, this); } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeResult.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeResult.java index b51c136fa41b7..2e6d64a69cee9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeResult.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheInvokeResult.java @@ -25,6 +25,9 @@ import javax.cache.processor.EntryProcessorException; import javax.cache.processor.EntryProcessorResult; import javax.cache.processor.MutableEntry; +import org.apache.ignite.IgniteException; +import org.apache.ignite.internal.UnregisteredBinaryTypeException; +import org.apache.ignite.internal.UnregisteredClassException; import org.apache.ignite.internal.util.tostring.GridToStringInclude; import org.apache.ignite.internal.util.typedef.internal.S; @@ -96,6 +99,9 @@ public static CacheInvokeResult fromError(Exception err) { /** {@inheritDoc} */ @Override public T get() throws EntryProcessorException { if (err != null) { + if (err instanceof UnregisteredClassException || err instanceof UnregisteredBinaryTypeException) + throw (IgniteException) err; + if (err instanceof EntryProcessorException) throw (EntryProcessorException)err; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java index 6d2688c948adb..c7a59a4606df7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheJoinNodeDiscoveryData.java @@ -17,6 +17,8 @@ package org.apache.ignite.internal.processors.cache; +import java.io.IOException; +import java.io.ObjectInputStream; import java.io.Serializable; import java.util.Map; import org.apache.ignite.internal.util.tostring.GridToStringInclude; @@ -99,30 +101,36 @@ public static class CacheInfo implements Serializable { /** */ @GridToStringInclude - private final StoredCacheData cacheData; + private StoredCacheData cacheData; /** */ @GridToStringInclude - private final CacheType cacheType; + private CacheType cacheType; /** */ @GridToStringInclude - private final boolean sql; + private boolean sql; /** Flags added for future usage. */ - private final long flags; + private long flags; + + /** Statically configured flag */ + private boolean staticallyConfigured; /** * @param cacheData Cache data. * @param cacheType Cache type. * @param sql SQL flag - {@code true} if cache was created with {@code CREATE TABLE}. * @param flags Flags (for future usage). + * @param staticallyConfigured {@code true} if it was configured by static config and {@code false} otherwise. */ - public CacheInfo(StoredCacheData cacheData, CacheType cacheType, boolean sql, long flags) { + public CacheInfo(StoredCacheData cacheData, CacheType cacheType, boolean sql, long flags, + boolean staticallyConfigured) { this.cacheData = cacheData; this.cacheType = cacheType; this.sql = sql; this.flags = flags; + this.staticallyConfigured = staticallyConfigured; } /** @@ -146,6 +154,34 @@ public boolean sql() { return sql; } + /** + * @return {@code true} if it was configured by static config and {@code false} otherwise. + */ + public boolean isStaticallyConfigured() { + return staticallyConfigured; + } + + /** + * @return Long which bits represent some flags. + */ + public long getFlags() { + return flags; + } + + /** + * @param ois ObjectInputStream. + */ + private void readObject(ObjectInputStream ois) + throws IOException, ClassNotFoundException { + ObjectInputStream.GetField gf = ois.readFields(); + + cacheData = (StoredCacheData)gf.get("cacheData", null); + cacheType = (CacheType)gf.get("cacheType", null); + sql = gf.get("sql", false); + flags = gf.get("flags", 0L); + staticallyConfigured = gf.get("staticallyConfigured", true); + } + /** {@inheritDoc} */ @Override public String toString() { return S.toString(CacheInfo.class, this); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLocalMetricsMXBeanImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLocalMetricsMXBeanImpl.java index 438c8c666c01b..212c7a07c4620 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLocalMetricsMXBeanImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheLocalMetricsMXBeanImpl.java @@ -115,6 +115,11 @@ class CacheLocalMetricsMXBeanImpl implements CacheMetricsMXBean { return cache.metrics0().getSize(); } + /** {@inheritDoc} */ + @Override public long getCacheSize() { + return cache.metrics0().getCacheSize(); + } + /** {@inheritDoc} */ @Override public int getKeySize() { return cache.metrics0().getKeySize(); @@ -365,6 +370,14 @@ class CacheLocalMetricsMXBeanImpl implements CacheMetricsMXBean { return cache.metrics0().getTotalPartitionsCount(); } + @Override public long getRebalancedKeys() { + return cache.metrics0().getRebalancedKeys(); + } + + @Override public long getEstimatedRebalancingKeys() { + return cache.metrics0().getEstimatedRebalancingKeys(); + } + /** {@inheritDoc} */ @Override public int getRebalancingPartitionsCount() { return cache.metrics0().getRebalancingPartitionsCount(); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java index 6fae8feff3c6c..8e139b0fb1fa2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsImpl.java @@ -24,9 +24,9 @@ import org.apache.ignite.cache.CachePeekMode; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState; import org.apache.ignite.internal.processors.cache.ratemetrics.HitRateMetrics; import org.apache.ignite.internal.processors.cache.store.GridCacheWriteBehindStore; import org.apache.ignite.internal.util.tostring.GridToStringExclude; @@ -254,6 +254,11 @@ public void delegate(CacheMetricsImpl delegate) { return getEntriesStat().size(); } + /** {@inheritDoc} */ + @Override public long getCacheSize() { + return getEntriesStat().cacheSize(); + } + /** {@inheritDoc} */ @Override public int getKeySize() { return getEntriesStat().keySize(); @@ -754,19 +759,24 @@ public EntriesStatMetrics getEntriesStat() { long offHeapBackupEntriesCnt = 0L; long heapEntriesCnt = 0L; int size = 0; + long sizeLong = 0L; boolean isEmpty; try { - if (cctx.isLocal()) { - if (cctx.cache() != null) { - offHeapEntriesCnt = cctx.cache().offHeapEntriesCount(); + final GridCacheAdapter cache = cctx.cache(); - offHeapPrimaryEntriesCnt = offHeapEntriesCnt; - offHeapBackupEntriesCnt = offHeapEntriesCnt; + if (cache != null) { + offHeapEntriesCnt = cache.offHeapEntriesCount(); + + size = cache.localSize(null); + sizeLong = cache.localSizeLong(null); + } - size = cctx.cache().size(); + if (cctx.isLocal()) { + if (cache != null) { + offHeapPrimaryEntriesCnt = offHeapEntriesCnt; - heapEntriesCnt = size; + heapEntriesCnt = cache.sizeLong(); } } else { @@ -775,8 +785,8 @@ public EntriesStatMetrics getEntriesStat() { Set primaries = cctx.affinity().primaryPartitions(cctx.localNodeId(), topVer); Set backups = cctx.affinity().backupPartitions(cctx.localNodeId(), topVer); - if (cctx.isNear() && cctx.cache() != null) - heapEntriesCnt = cctx.cache().nearSize(); + if (cctx.isNear() && cache != null) + heapEntriesCnt = cache.nearSize(); for (GridDhtLocalPartition part : cctx.topology().currentLocalPartitions()) { // Partitions count. @@ -789,21 +799,16 @@ public EntriesStatMetrics getEntriesStat() { movingPartCnt++; // Offheap entries count - if (cctx.cache() == null) + if (cache == null) continue; - int cacheSize = part.dataStore().cacheSize(cctx.cacheId()); - - offHeapEntriesCnt += cacheSize; + long cacheSize = part.dataStore().cacheSize(cctx.cacheId()); if (primaries.contains(part.id())) offHeapPrimaryEntriesCnt += cacheSize; - - if (backups.contains(part.id())) + else if (backups.contains(part.id())) offHeapBackupEntriesCnt += cacheSize; - size = (int)offHeapEntriesCnt; - heapEntriesCnt += part.publicSize(cctx.cacheId()); } } @@ -816,6 +821,7 @@ public EntriesStatMetrics getEntriesStat() { offHeapBackupEntriesCnt = -1L; heapEntriesCnt = -1L; size = -1; + sizeLong = -1L; } isEmpty = (offHeapEntriesCnt == 0); @@ -827,6 +833,7 @@ public EntriesStatMetrics getEntriesStat() { stat.offHeapBackupEntriesCount(offHeapBackupEntriesCnt); stat.heapEntriesCount(heapEntriesCnt); stat.size(size); + stat.cacheSize(sizeLong); stat.keySize(size); stat.isEmpty(isEmpty); stat.totalPartitionsCount(owningPartCnt + movingPartCnt); @@ -845,6 +852,16 @@ public EntriesStatMetrics getEntriesStat() { return getEntriesStat().rebalancingPartitionsCount(); } + /** {@inheritDoc} */ + @Override public long getRebalancedKeys() { + return rebalancedKeys.get(); + } + + /** {@inheritDoc} */ + @Override public long getEstimatedRebalancingKeys() { + return estimatedRebalancingKeys.get(); + } + /** {@inheritDoc} */ @Override public long getKeysToRebalanceLeft() { return Math.max(0, estimatedRebalancingKeys.get() - rebalancedKeys.get()); @@ -924,7 +941,10 @@ public void rebalanceClearingPartitions(int partitions) { * First rebalance supply message callback. * @param keysCnt Estimated number of keys. */ - public void onRebalancingKeysCountEstimateReceived(long keysCnt) { + public void onRebalancingKeysCountEstimateReceived(Long keysCnt) { + if (keysCnt == null) + return; + estimatedRebalancingKeys.addAndGet(keysCnt); } @@ -1039,6 +1059,9 @@ public static class EntriesStatMetrics { /** Size. */ private int size; + /** Long size. */ + private long cacheSize; + /** Key size. */ private int keySize; @@ -1157,6 +1180,20 @@ public void keySize(int keySize) { this.keySize = keySize; } + /** + * @return Long size. + */ + public long cacheSize() { + return cacheSize; + } + + /** + * @param cacheSize Size long. + */ + public void cacheSize(long cacheSize) { + this.cacheSize = cacheSize; + } + /** * @return Is empty. */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsSnapshot.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsSnapshot.java index e69372001f215..5ccd05e5fca8b 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsSnapshot.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsSnapshot.java @@ -23,11 +23,14 @@ import java.io.ObjectOutput; import java.util.Collection; import org.apache.ignite.cache.CacheMetrics; +import org.apache.ignite.internal.marshaller.optimized.OptimizedObjectOutputStream; import org.apache.ignite.internal.util.typedef.internal.S; /** * Metrics snapshot. + * @deprecated Replaced by CacheMetricsSnapshotV2 with versioning support. */ +@Deprecated public class CacheMetricsSnapshot implements CacheMetrics, Externalizable { /** */ private static final long serialVersionUID = 0L; @@ -110,6 +113,9 @@ public class CacheMetricsSnapshot implements CacheMetrics, Externalizable { /** Number of non-{@code null} values in the cache. */ private int size; + /** Cache size. */ + private long cacheSize; + /** Number of keys in the cache, possibly with {@code null} values. */ private int keySize; @@ -194,6 +200,12 @@ public class CacheMetricsSnapshot implements CacheMetrics, Externalizable { /** Rebalancing partitions count. */ private int rebalancingPartitionsCnt; + /** Number of already rebalanced keys. */ + private long rebalancedKeys; + + /** Number estimated to rebalance keys. */ + private long estimatedRebalancingKeys; + /** Keys to rebalance left. */ private long keysToRebalanceLeft; @@ -285,8 +297,9 @@ public CacheMetricsSnapshot(CacheMetricsImpl m) { offHeapAllocatedSize = m.getOffHeapAllocatedSize(); - size = entriesStat.size(); + cacheSize = entriesStat.cacheSize(); keySize = entriesStat.keySize(); + size = entriesStat.size(); isEmpty = entriesStat.isEmpty(); dhtEvictQueueCurrSize = m.getDhtEvictQueueCurrentSize(); @@ -327,6 +340,8 @@ public CacheMetricsSnapshot(CacheMetricsImpl m) { totalPartitionsCnt = entriesStat.totalPartitionsCount(); rebalancingPartitionsCnt = entriesStat.rebalancingPartitionsCount(); + rebalancedKeys = m.getRebalancedKeys(); + estimatedRebalancingKeys = m.getEstimatedRebalancingKeys(); keysToRebalanceLeft = m.getKeysToRebalanceLeft(); rebalancingBytesRate = m.getRebalancingBytesRate(); rebalancingKeysRate = m.getRebalancingKeysRate(); @@ -351,6 +366,7 @@ public CacheMetricsSnapshot(CacheMetrics loc, Collection metrics) writeBehindStoreBatchSize = loc.getWriteBehindStoreBatchSize(); writeBehindBufSize = loc.getWriteBehindBufferSize(); size = loc.getSize(); + cacheSize = loc.getCacheSize(); keySize = loc.getKeySize(); keyType = loc.getKeyType(); @@ -454,6 +470,8 @@ public CacheMetricsSnapshot(CacheMetrics loc, Collection metrics) else writeBehindErrorRetryCnt = -1; + rebalancedKeys += e.getRebalancedKeys(); + estimatedRebalancingKeys += e.getEstimatedRebalancingKeys(); totalPartitionsCnt += e.getTotalPartitionsCount(); rebalancingPartitionsCnt += e.getRebalancingPartitionsCount(); keysToRebalanceLeft += e.getKeysToRebalanceLeft(); @@ -633,6 +651,11 @@ public CacheMetricsSnapshot(CacheMetrics loc, Collection metrics) return size; } + /** {@inheritDoc} */ + @Override public long getCacheSize() { + return cacheSize; + } + /** {@inheritDoc} */ @Override public int getKeySize() { return keySize; @@ -723,6 +746,14 @@ public CacheMetricsSnapshot(CacheMetrics loc, Collection metrics) return totalPartitionsCnt; } + @Override public long getRebalancedKeys() { + return rebalancedKeys; + } + + @Override public long getEstimatedRebalancingKeys() { + return estimatedRebalancingKeys; + } + /** {@inheritDoc} */ @Override public int getRebalancingPartitionsCount() { return rebalancingPartitionsCnt; @@ -916,6 +947,14 @@ public CacheMetricsSnapshot(CacheMetrics loc, Collection metrics) out.writeLong(keysToRebalanceLeft); out.writeLong(rebalancingBytesRate); out.writeLong(rebalancingKeysRate); + + if (!(out instanceof OptimizedObjectOutputStream)) { + out.writeLong(rebalancedKeys); + out.writeLong(estimatedRebalancingKeys); + out.writeLong(rebalanceStartTime); + out.writeLong(rebalanceFinishTime); + out.writeLong(rebalanceClearingPartitionsLeft); + } } /** {@inheritDoc} */ @@ -971,5 +1010,13 @@ public CacheMetricsSnapshot(CacheMetrics loc, Collection metrics) keysToRebalanceLeft = in.readLong(); rebalancingBytesRate = in.readLong(); rebalancingKeysRate = in.readLong(); + + if (in.available() >= 40) { + rebalancedKeys = in.readLong(); + estimatedRebalancingKeys = in.readLong(); + rebalanceStartTime = in.readLong(); + rebalanceFinishTime = in.readLong(); + rebalanceClearingPartitionsLeft = in.readLong(); + } } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsSnapshotV2.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsSnapshotV2.java new file mode 100644 index 0000000000000..23158785aea85 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheMetricsSnapshotV2.java @@ -0,0 +1,1140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.io.IOException; +import java.io.ObjectInput; +import java.io.ObjectOutput; +import java.util.Collection; +import org.apache.ignite.cache.CacheMetrics; +import org.apache.ignite.internal.dto.IgniteDataTransferObject; +import org.apache.ignite.internal.util.typedef.internal.S; + +/** + * Metrics snapshot. + */ +public class CacheMetricsSnapshotV2 extends IgniteDataTransferObject implements CacheMetrics { + /** + * + */ + private static final long serialVersionUID = 0L; + + /** Number of reads. */ + private long reads; + + /** Number of puts. */ + private long puts; + + /** Number of invokes caused updates. */ + private long entryProcessorPuts; + + /** Number of invokes caused no updates. */ + private long entryProcessorReadOnlyInvocations; + + /** + * The mean time to execute cache invokes + */ + private float entryProcessorAverageInvocationTime; + + /** + * The total number of cache invocations. + */ + private long entryProcessorInvocations; + + /** + * The total number of cache invocations, caused removal. + */ + private long entryProcessorRemovals; + + /** + * The total number of invocations on keys, which don't exist in cache. + */ + private long entryProcessorMisses; + + /** + * The total number of invocations on keys, which exist in cache. + */ + private long entryProcessorHits; + + /** + * The percentage of invocations on keys, which don't exist in cache. + */ + private float entryProcessorMissPercentage; + + /** + * The percentage of invocations on keys, which exist in cache. + */ + private float entryProcessorHitPercentage; + + /** + * So far, the maximum time to execute cache invokes. + */ + private float entryProcessorMaxInvocationTime; + + /** + * So far, the minimum time to execute cache invokes. + */ + private float entryProcessorMinInvocationTime; + + /** Number of hits. */ + private long hits; + + /** Number of misses. */ + private long misses; + + /** Number of transaction commits. */ + private long txCommits; + + /** Number of transaction rollbacks. */ + private long txRollbacks; + + /** Number of evictions. */ + private long evicts; + + /** Number of removed entries. */ + private long removes; + + /** Put time taken nanos. */ + private float putAvgTimeNanos; + + /** Get time taken nanos. */ + private float getAvgTimeNanos; + + /** Remove time taken nanos. */ + private float rmvAvgTimeNanos; + + /** Commit transaction time taken nanos. */ + private float commitAvgTimeNanos; + + /** Commit transaction time taken nanos. */ + private float rollbackAvgTimeNanos; + + /** Cache name */ + private String cacheName; + + /** Number of reads from off-heap. */ + private long offHeapGets; + + /** Number of writes to off-heap. */ + private long offHeapPuts; + + /** Number of removed entries from off-heap. */ + private long offHeapRemoves; + + /** Number of evictions from off-heap. */ + private long offHeapEvicts; + + /** Off-heap hits number. */ + private long offHeapHits; + + /** Off-heap misses number. */ + private long offHeapMisses; + + /** Number of entries stored in off-heap memory. */ + private long offHeapEntriesCnt; + + /** Number of entries stored in heap. */ + private long heapEntriesCnt; + + /** Number of primary entries stored in off-heap memory. */ + private long offHeapPrimaryEntriesCnt; + + /** Number of backup entries stored in off-heap memory. */ + private long offHeapBackupEntriesCnt; + + /** Memory size allocated in off-heap. */ + private long offHeapAllocatedSize; + + /** Number of non-{@code null} values in the cache. */ + private int size; + + /** Cache size. */ + private long cacheSize; + + /** Number of keys in the cache, possibly with {@code null} values. */ + private int keySize; + + /** Cache is empty. */ + private boolean isEmpty; + + /** Gets current size of evict queue used to batch up evictions. */ + private int dhtEvictQueueCurrSize; + + /** Transaction per-thread map size. */ + private int txThreadMapSize; + + /** Transaction per-Xid map size. */ + private int txXidMapSize; + + /** Committed transaction queue size. */ + private int txCommitQueueSize; + + /** Prepared transaction queue size. */ + private int txPrepareQueueSize; + + /** Start version counts map size. */ + private int txStartVerCountsSize; + + /** Number of cached committed transaction IDs. */ + private int txCommittedVersionsSize; + + /** Number of cached rolled back transaction IDs. */ + private int txRolledbackVersionsSize; + + /** DHT thread map size. */ + private int txDhtThreadMapSize; + + /** Transaction DHT per-Xid map size. */ + private int txDhtXidMapSize; + + /** Committed DHT transaction queue size. */ + private int txDhtCommitQueueSize; + + /** Prepared DHT transaction queue size. */ + private int txDhtPrepareQueueSize; + + /** DHT start version counts map size. */ + private int txDhtStartVerCountsSize; + + /** Number of cached committed DHT transaction IDs. */ + private int txDhtCommittedVersionsSize; + + /** Number of cached rolled back DHT transaction IDs. */ + private int txDhtRolledbackVersionsSize; + + /** Write-behind is enabled. */ + private boolean isWriteBehindEnabled; + + /** Buffer size that triggers flush procedure. */ + private int writeBehindFlushSize; + + /** Count of worker threads. */ + private int writeBehindFlushThreadCnt; + + /** Flush frequency in milliseconds. */ + private long writeBehindFlushFreq; + + /** Maximum size of batch. */ + private int writeBehindStoreBatchSize; + + /** Count of cache overflow events since start. */ + private int writeBehindTotalCriticalOverflowCnt; + + /** Count of cache overflow events since start. */ + private int writeBehindCriticalOverflowCnt; + + /** Count of entries in store-retry state. */ + private int writeBehindErrorRetryCnt; + + /** Total count of entries in cache store internal buffer. */ + private int writeBehindBufSize; + + /** Total partitions count. */ + private int totalPartitionsCnt; + + /** Rebalancing partitions count. */ + private int rebalancingPartitionsCnt; + + /** Number of already rebalanced keys. */ + private long rebalancedKeys; + + /** Number estimated to rebalance keys. */ + private long estimatedRebalancingKeys; + + /** Keys to rebalance left. */ + private long keysToRebalanceLeft; + + /** Rebalancing keys rate. */ + private long rebalancingKeysRate; + + /** Get rebalancing bytes rate. */ + private long rebalancingBytesRate; + + /** Start rebalance time. */ + private long rebalanceStartTime; + + /** Estimate rebalance finish time. */ + private long rebalanceFinishTime; + + /** The number of clearing partitions need to await before rebalance. */ + private long rebalanceClearingPartitionsLeft; + + /** + * + */ + private String keyType; + + /** + * + */ + private String valType; + + /** + * + */ + private boolean isStoreByVal; + + /** + * + */ + private boolean isStatisticsEnabled; + + /** + * + */ + private boolean isManagementEnabled; + + /** + * + */ + private boolean isReadThrough; + + /** + * + */ + private boolean isWriteThrough; + + /** + * + */ + private boolean isValidForReading; + + /** + * + */ + private boolean isValidForWriting; + + /** + * Default constructor. + */ + public CacheMetricsSnapshotV2() { + // No-op. + } + + /** + * Create snapshot for given metrics. + * + * @param m Cache metrics. + */ + public CacheMetricsSnapshotV2(CacheMetricsImpl m) { + reads = m.getCacheGets(); + puts = m.getCachePuts(); + hits = m.getCacheHits(); + misses = m.getCacheMisses(); + txCommits = m.getCacheTxCommits(); + txRollbacks = m.getCacheTxRollbacks(); + evicts = m.getCacheEvictions(); + removes = m.getCacheRemovals(); + + //TODO GG-14665 + entryProcessorPuts = 0; + entryProcessorReadOnlyInvocations = 0; + entryProcessorInvocations = 0; + entryProcessorRemovals = 0; + entryProcessorMisses = 0; + entryProcessorHits = 0; + entryProcessorMissPercentage = 0; + entryProcessorHitPercentage = 0; + entryProcessorAverageInvocationTime = 0; + entryProcessorMaxInvocationTime = 0; + entryProcessorMinInvocationTime = 0; + + putAvgTimeNanos = m.getAveragePutTime(); + getAvgTimeNanos = m.getAverageGetTime(); + rmvAvgTimeNanos = m.getAverageRemoveTime(); + commitAvgTimeNanos = m.getAverageTxCommitTime(); + rollbackAvgTimeNanos = m.getAverageTxRollbackTime(); + + cacheName = m.name(); + + offHeapGets = m.getOffHeapGets(); + offHeapPuts = m.getOffHeapPuts(); + offHeapRemoves = m.getOffHeapRemovals(); + offHeapEvicts = m.getOffHeapEvictions(); + offHeapHits = m.getOffHeapHits(); + offHeapMisses = m.getOffHeapMisses(); + + CacheMetricsImpl.EntriesStatMetrics entriesStat = m.getEntriesStat(); + + offHeapEntriesCnt = entriesStat.offHeapEntriesCount(); + heapEntriesCnt = entriesStat.heapEntriesCount(); + offHeapPrimaryEntriesCnt = entriesStat.offHeapPrimaryEntriesCount(); + offHeapBackupEntriesCnt = entriesStat.offHeapBackupEntriesCount(); + + offHeapAllocatedSize = m.getOffHeapAllocatedSize(); + + cacheSize = entriesStat.cacheSize(); + keySize = entriesStat.keySize(); + size = entriesStat.size(); + isEmpty = entriesStat.isEmpty(); + + dhtEvictQueueCurrSize = m.getDhtEvictQueueCurrentSize(); + txThreadMapSize = m.getTxThreadMapSize(); + txXidMapSize = m.getTxXidMapSize(); + txCommitQueueSize = m.getTxCommitQueueSize(); + txPrepareQueueSize = m.getTxPrepareQueueSize(); + txStartVerCountsSize = m.getTxStartVersionCountsSize(); + txCommittedVersionsSize = m.getTxCommittedVersionsSize(); + txRolledbackVersionsSize = m.getTxRolledbackVersionsSize(); + txDhtThreadMapSize = m.getTxDhtThreadMapSize(); + txDhtXidMapSize = m.getTxDhtXidMapSize(); + txDhtCommitQueueSize = m.getTxDhtCommitQueueSize(); + txDhtPrepareQueueSize = m.getTxDhtPrepareQueueSize(); + txDhtStartVerCountsSize = m.getTxDhtStartVersionCountsSize(); + txDhtCommittedVersionsSize = m.getTxDhtCommittedVersionsSize(); + txDhtRolledbackVersionsSize = m.getTxDhtRolledbackVersionsSize(); + isWriteBehindEnabled = m.isWriteBehindEnabled(); + writeBehindFlushSize = m.getWriteBehindFlushSize(); + writeBehindFlushThreadCnt = m.getWriteBehindFlushThreadCount(); + writeBehindFlushFreq = m.getWriteBehindFlushFrequency(); + writeBehindStoreBatchSize = m.getWriteBehindStoreBatchSize(); + writeBehindTotalCriticalOverflowCnt = m.getWriteBehindTotalCriticalOverflowCount(); + writeBehindCriticalOverflowCnt = m.getWriteBehindCriticalOverflowCount(); + writeBehindErrorRetryCnt = m.getWriteBehindErrorRetryCount(); + writeBehindBufSize = m.getWriteBehindBufferSize(); + + keyType = m.getKeyType(); + valType = m.getValueType(); + isStoreByVal = m.isStoreByValue(); + isStatisticsEnabled = m.isStatisticsEnabled(); + isManagementEnabled = m.isManagementEnabled(); + isReadThrough = m.isReadThrough(); + isWriteThrough = m.isWriteThrough(); + isValidForReading = m.isValidForReading(); + isValidForWriting = m.isValidForWriting(); + + totalPartitionsCnt = entriesStat.totalPartitionsCount(); + rebalancingPartitionsCnt = entriesStat.rebalancingPartitionsCount(); + + rebalancedKeys = m.getRebalancedKeys(); + estimatedRebalancingKeys = m.getEstimatedRebalancingKeys(); + keysToRebalanceLeft = m.getKeysToRebalanceLeft(); + rebalancingBytesRate = m.getRebalancingBytesRate(); + rebalancingKeysRate = m.getRebalancingKeysRate(); + rebalanceStartTime = m.rebalancingStartTime(); + rebalanceFinishTime = m.estimateRebalancingFinishTime(); + rebalanceClearingPartitionsLeft = m.getRebalanceClearingPartitionsLeft(); + } + + /** + * Constructs merged cache metrics. + * + * @param loc Metrics for cache on local node. + * @param metrics Metrics for merge. + */ + public CacheMetricsSnapshotV2(CacheMetrics loc, Collection metrics) { + cacheName = loc.name(); + isEmpty = loc.isEmpty(); + isWriteBehindEnabled = loc.isWriteBehindEnabled(); + writeBehindFlushSize = loc.getWriteBehindFlushSize(); + writeBehindFlushThreadCnt = loc.getWriteBehindFlushThreadCount(); + writeBehindFlushFreq = loc.getWriteBehindFlushFrequency(); + writeBehindStoreBatchSize = loc.getWriteBehindStoreBatchSize(); + writeBehindBufSize = loc.getWriteBehindBufferSize(); + size = 0; + cacheSize = 0; + keySize = 0; + + keyType = loc.getKeyType(); + valType = loc.getValueType(); + isStoreByVal = loc.isStoreByValue(); + isStatisticsEnabled = loc.isStatisticsEnabled(); + isManagementEnabled = loc.isManagementEnabled(); + isReadThrough = loc.isReadThrough(); + isWriteThrough = loc.isWriteThrough(); + isValidForReading = loc.isValidForReading(); + isValidForWriting = loc.isValidForWriting(); + + for (CacheMetrics e : metrics) { + reads += e.getCacheGets(); + puts += e.getCachePuts(); + size += e.getSize(); + keySize += e.getKeySize(); + cacheSize += e.getCacheSize(); + isEmpty &= e.isEmpty(); + hits += e.getCacheHits(); + misses += e.getCacheMisses(); + txCommits += e.getCacheTxCommits(); + txRollbacks += e.getCacheTxRollbacks(); + evicts += e.getCacheEvictions(); + removes += e.getCacheRemovals(); + + putAvgTimeNanos += e.getAveragePutTime(); + getAvgTimeNanos += e.getAverageGetTime(); + rmvAvgTimeNanos += e.getAverageRemoveTime(); + commitAvgTimeNanos += e.getAverageTxCommitTime(); + rollbackAvgTimeNanos += e.getAverageTxRollbackTime(); + + offHeapGets += e.getOffHeapGets(); + offHeapPuts += e.getOffHeapPuts(); + offHeapRemoves += e.getOffHeapRemovals(); + offHeapEvicts += e.getOffHeapEvictions(); + offHeapHits += e.getOffHeapHits(); + offHeapMisses += e.getOffHeapMisses(); + offHeapEntriesCnt += e.getOffHeapEntriesCount(); + heapEntriesCnt += e.getHeapEntriesCount(); + offHeapPrimaryEntriesCnt += e.getOffHeapPrimaryEntriesCount(); + offHeapBackupEntriesCnt += e.getOffHeapBackupEntriesCount(); + offHeapAllocatedSize += e.getOffHeapAllocatedSize(); + + if (e.getDhtEvictQueueCurrentSize() > -1) + dhtEvictQueueCurrSize += e.getDhtEvictQueueCurrentSize(); + else + dhtEvictQueueCurrSize = -1; + + txThreadMapSize += e.getTxThreadMapSize(); + txXidMapSize += e.getTxXidMapSize(); + txCommitQueueSize += e.getTxCommitQueueSize(); + txPrepareQueueSize += e.getTxPrepareQueueSize(); + txStartVerCountsSize += e.getTxStartVersionCountsSize(); + txCommittedVersionsSize += e.getTxCommittedVersionsSize(); + txRolledbackVersionsSize += e.getTxRolledbackVersionsSize(); + + if (e.getTxDhtThreadMapSize() > -1) + txDhtThreadMapSize += e.getTxDhtThreadMapSize(); + else + txDhtThreadMapSize = -1; + + if (e.getTxDhtXidMapSize() > -1) + txDhtXidMapSize += e.getTxDhtXidMapSize(); + else + txDhtXidMapSize = -1; + + if (e.getTxDhtCommitQueueSize() > -1) + txDhtCommitQueueSize += e.getTxDhtCommitQueueSize(); + else + txDhtCommitQueueSize = -1; + + if (e.getTxDhtPrepareQueueSize() > -1) + txDhtPrepareQueueSize += e.getTxDhtPrepareQueueSize(); + else + txDhtPrepareQueueSize = -1; + + if (e.getTxDhtStartVersionCountsSize() > -1) + txDhtStartVerCountsSize += e.getTxDhtStartVersionCountsSize(); + else + txDhtStartVerCountsSize = -1; + + if (e.getTxDhtCommittedVersionsSize() > -1) + txDhtCommittedVersionsSize += e.getTxDhtCommittedVersionsSize(); + else + txDhtCommittedVersionsSize = -1; + + if (e.getTxDhtRolledbackVersionsSize() > -1) + txDhtRolledbackVersionsSize += e.getTxDhtRolledbackVersionsSize(); + else + txDhtRolledbackVersionsSize = -1; + + if (e.getWriteBehindTotalCriticalOverflowCount() > -1) + writeBehindTotalCriticalOverflowCnt += e.getWriteBehindTotalCriticalOverflowCount(); + else + writeBehindTotalCriticalOverflowCnt = -1; + + if (e.getWriteBehindCriticalOverflowCount() > -1) + writeBehindCriticalOverflowCnt += e.getWriteBehindCriticalOverflowCount(); + else + writeBehindCriticalOverflowCnt = -1; + + if (e.getWriteBehindErrorRetryCount() > -1) + writeBehindErrorRetryCnt += e.getWriteBehindErrorRetryCount(); + else + writeBehindErrorRetryCnt = -1; + + rebalancedKeys += e.getRebalancedKeys(); + estimatedRebalancingKeys += e.getEstimatedRebalancingKeys(); + totalPartitionsCnt += e.getTotalPartitionsCount(); + rebalancingPartitionsCnt += e.getRebalancingPartitionsCount(); + keysToRebalanceLeft += e.getKeysToRebalanceLeft(); + rebalancingBytesRate += e.getRebalancingBytesRate(); + rebalancingKeysRate += e.getRebalancingKeysRate(); + } + + int size = metrics.size(); + + if (size > 1) { + putAvgTimeNanos /= size; + getAvgTimeNanos /= size; + rmvAvgTimeNanos /= size; + commitAvgTimeNanos /= size; + rollbackAvgTimeNanos /= size; + } + } + + /** {@inheritDoc} */ + @Override public long getCacheHits() { + return hits; + } + + /** {@inheritDoc} */ + @Override public float getCacheHitPercentage() { + if (hits == 0 || reads == 0) + return 0; + + return (float)hits / reads * 100.0f; + } + + /** {@inheritDoc} */ + @Override public long getCacheMisses() { + return misses; + } + + /** {@inheritDoc} */ + @Override public float getCacheMissPercentage() { + if (misses == 0 || reads == 0) + return 0; + + return (float)misses / reads * 100.0f; + } + + /** {@inheritDoc} */ + @Override public long getCacheGets() { + return reads; + } + + /** {@inheritDoc} */ + @Override public long getCachePuts() { + return puts; + } + + /** {@inheritDoc} */ + @Override public long getCacheRemovals() { + return removes; + } + + /** {@inheritDoc} */ + @Override public long getCacheEvictions() { + return evicts; + } + + /** {@inheritDoc} */ + @Override public float getAverageGetTime() { + return getAvgTimeNanos; + } + + /** {@inheritDoc} */ + @Override public float getAveragePutTime() { + return putAvgTimeNanos; + } + + /** {@inheritDoc} */ + @Override public float getAverageRemoveTime() { + return rmvAvgTimeNanos; + } + + /** {@inheritDoc} */ + @Override public float getAverageTxCommitTime() { + return commitAvgTimeNanos; + } + + /** {@inheritDoc} */ + @Override public float getAverageTxRollbackTime() { + return rollbackAvgTimeNanos; + } + + /** {@inheritDoc} */ + @Override public long getCacheTxCommits() { + return txCommits; + } + + /** {@inheritDoc} */ + @Override public long getCacheTxRollbacks() { + return txRollbacks; + } + + /** {@inheritDoc} */ + @Override public String name() { + return cacheName; + } + + /** {@inheritDoc} */ + @Override public long getOffHeapGets() { + return offHeapGets; + } + + /** {@inheritDoc} */ + @Override public long getOffHeapPuts() { + return offHeapPuts; + } + + /** {@inheritDoc} */ + @Override public long getOffHeapRemovals() { + return offHeapRemoves; + } + + /** {@inheritDoc} */ + @Override public long getOffHeapEvictions() { + return offHeapEvicts; + } + + /** {@inheritDoc} */ + @Override public long getOffHeapHits() { + return offHeapHits; + } + + /** {@inheritDoc} */ + @Override public float getOffHeapHitPercentage() { + if (offHeapHits == 0 || offHeapGets == 0) + return 0; + + return (float)offHeapHits / offHeapGets * 100.0f; + } + + /** {@inheritDoc} */ + @Override public long getOffHeapMisses() { + return offHeapMisses; + } + + /** {@inheritDoc} */ + @Override public float getOffHeapMissPercentage() { + if (offHeapMisses == 0 || offHeapGets == 0) + return 0; + + return (float)offHeapMisses / offHeapGets * 100.0f; + } + + /** {@inheritDoc} */ + @Override public long getOffHeapEntriesCount() { + return offHeapEntriesCnt; + } + + /** {@inheritDoc} */ + @Override public long getHeapEntriesCount() { + return heapEntriesCnt; + } + + /** {@inheritDoc} */ + @Override public long getOffHeapPrimaryEntriesCount() { + return offHeapPrimaryEntriesCnt; + } + + /** {@inheritDoc} */ + @Override public long getOffHeapBackupEntriesCount() { + return offHeapBackupEntriesCnt; + } + + /** {@inheritDoc} */ + @Override public long getOffHeapAllocatedSize() { + return offHeapAllocatedSize; + } + + /** {@inheritDoc} */ + @Override public int getSize() { + return size; + } + + /** {@inheritDoc} */ + @Override public long getCacheSize() { + return cacheSize; + } + + /** {@inheritDoc} */ + @Override public int getKeySize() { + return keySize; + } + + /** {@inheritDoc} */ + @Override public boolean isEmpty() { + return isEmpty; + } + + /** {@inheritDoc} */ + @Override public int getDhtEvictQueueCurrentSize() { + return dhtEvictQueueCurrSize; + } + + /** {@inheritDoc} */ + @Override public int getTxThreadMapSize() { + return txThreadMapSize; + } + + /** {@inheritDoc} */ + @Override public int getTxXidMapSize() { + return txXidMapSize; + } + + /** {@inheritDoc} */ + @Override public int getTxCommitQueueSize() { + return txCommitQueueSize; + } + + /** {@inheritDoc} */ + @Override public int getTxPrepareQueueSize() { + return txPrepareQueueSize; + } + + /** {@inheritDoc} */ + @Override public int getTxStartVersionCountsSize() { + return txStartVerCountsSize; + } + + /** {@inheritDoc} */ + @Override public int getTxCommittedVersionsSize() { + return txCommittedVersionsSize; + } + + /** {@inheritDoc} */ + @Override public int getTxRolledbackVersionsSize() { + return txRolledbackVersionsSize; + } + + /** {@inheritDoc} */ + @Override public int getTxDhtThreadMapSize() { + return txDhtThreadMapSize; + } + + /** {@inheritDoc} */ + @Override public int getTxDhtXidMapSize() { + return txDhtXidMapSize; + } + + /** {@inheritDoc} */ + @Override public int getTxDhtCommitQueueSize() { + return txDhtCommitQueueSize; + } + + /** {@inheritDoc} */ + @Override public int getTxDhtPrepareQueueSize() { + return txDhtPrepareQueueSize; + } + + /** {@inheritDoc} */ + @Override public int getTxDhtStartVersionCountsSize() { + return txDhtStartVerCountsSize; + } + + /** {@inheritDoc} */ + @Override public int getTxDhtCommittedVersionsSize() { + return txDhtCommittedVersionsSize; + } + + /** {@inheritDoc} */ + @Override public int getTxDhtRolledbackVersionsSize() { + return txDhtRolledbackVersionsSize; + } + + /** {@inheritDoc} */ + @Override public int getTotalPartitionsCount() { + return totalPartitionsCnt; + } + + /** {@inheritDoc} */ + @Override public long getRebalancedKeys() { + return rebalancedKeys; + } + + /** {@inheritDoc} */ + @Override public long getEstimatedRebalancingKeys() { + return estimatedRebalancingKeys; + } + + /** {@inheritDoc} */ + @Override public int getRebalancingPartitionsCount() { + return rebalancingPartitionsCnt; + } + + /** {@inheritDoc} */ + @Override public long getKeysToRebalanceLeft() { + return keysToRebalanceLeft; + } + + /** {@inheritDoc} */ + @Override public long getRebalancingKeysRate() { + return rebalancingKeysRate; + } + + /** {@inheritDoc} */ + @Override public long getRebalancingBytesRate() { + return rebalancingBytesRate; + } + + /** {@inheritDoc} */ + @Override public long estimateRebalancingFinishTime() { + return rebalanceFinishTime; + } + + /** {@inheritDoc} */ + @Override public long rebalancingStartTime() { + return rebalanceStartTime; + } + + /** {@inheritDoc} */ + @Override public long getEstimatedRebalancingFinishTime() { + return rebalanceFinishTime; + } + + /** {@inheritDoc} */ + @Override public long getRebalancingStartTime() { + return rebalanceStartTime; + } + + /** {@inheritDoc} */ + @Override public long getRebalanceClearingPartitionsLeft() { + return rebalanceClearingPartitionsLeft; + } + + /** {@inheritDoc} */ + @Override public boolean isWriteBehindEnabled() { + return isWriteBehindEnabled; + } + + /** {@inheritDoc} */ + @Override public int getWriteBehindFlushSize() { + return writeBehindFlushSize; + } + + /** {@inheritDoc} */ + @Override public int getWriteBehindFlushThreadCount() { + return writeBehindFlushThreadCnt; + } + + /** {@inheritDoc} */ + @Override public long getWriteBehindFlushFrequency() { + return writeBehindFlushFreq; + } + + /** {@inheritDoc} */ + @Override public int getWriteBehindStoreBatchSize() { + return writeBehindStoreBatchSize; + } + + /** {@inheritDoc} */ + @Override public int getWriteBehindTotalCriticalOverflowCount() { + return writeBehindTotalCriticalOverflowCnt; + } + + /** {@inheritDoc} */ + @Override public int getWriteBehindCriticalOverflowCount() { + return writeBehindCriticalOverflowCnt; + } + + /** {@inheritDoc} */ + @Override public int getWriteBehindErrorRetryCount() { + return writeBehindErrorRetryCnt; + } + + /** {@inheritDoc} */ + @Override public int getWriteBehindBufferSize() { + return writeBehindBufSize; + } + + /** {@inheritDoc} */ + @Override public String getKeyType() { + return keyType; + } + + /** {@inheritDoc} */ + @Override public String getValueType() { + return valType; + } + + /** {@inheritDoc} */ + @Override public boolean isStoreByValue() { + return isStoreByVal; + } + + /** {@inheritDoc} */ + @Override public boolean isStatisticsEnabled() { + return isStatisticsEnabled; + } + + /** {@inheritDoc} */ + @Override public boolean isManagementEnabled() { + return isManagementEnabled; + } + + /** {@inheritDoc} */ + @Override public boolean isReadThrough() { + return isReadThrough; + } + + /** {@inheritDoc} */ + @Override public boolean isWriteThrough() { + return isWriteThrough; + } + + /** {@inheritDoc} */ + @Override public boolean isValidForReading() { + return isValidForReading; + } + + /** {@inheritDoc} */ + @Override public boolean isValidForWriting() { + return isValidForWriting; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(CacheMetricsSnapshotV2.class, this); + } + + /** {@inheritDoc} */ + @Override public void writeExternalData(ObjectOutput out) throws IOException { + out.writeLong(reads); + out.writeLong(puts); + out.writeLong(hits); + out.writeLong(misses); + out.writeLong(txCommits); + out.writeLong(txRollbacks); + out.writeLong(evicts); + out.writeLong(removes); + + out.writeFloat(putAvgTimeNanos); + out.writeFloat(getAvgTimeNanos); + out.writeFloat(rmvAvgTimeNanos); + out.writeFloat(commitAvgTimeNanos); + out.writeFloat(rollbackAvgTimeNanos); + + out.writeLong(offHeapGets); + out.writeLong(offHeapPuts); + out.writeLong(offHeapRemoves); + out.writeLong(offHeapEvicts); + out.writeLong(offHeapHits); + out.writeLong(offHeapMisses); + out.writeLong(offHeapEntriesCnt); + out.writeLong(heapEntriesCnt); + out.writeLong(offHeapPrimaryEntriesCnt); + out.writeLong(offHeapBackupEntriesCnt); + out.writeLong(offHeapAllocatedSize); + + out.writeInt(dhtEvictQueueCurrSize); + out.writeInt(txThreadMapSize); + out.writeInt(txXidMapSize); + out.writeInt(txCommitQueueSize); + out.writeInt(txPrepareQueueSize); + out.writeInt(txStartVerCountsSize); + out.writeInt(txCommittedVersionsSize); + out.writeInt(txRolledbackVersionsSize); + out.writeInt(txDhtThreadMapSize); + out.writeInt(txDhtXidMapSize); + out.writeInt(txDhtCommitQueueSize); + out.writeInt(txDhtPrepareQueueSize); + out.writeInt(txDhtStartVerCountsSize); + out.writeInt(txDhtCommittedVersionsSize); + out.writeInt(txDhtRolledbackVersionsSize); + out.writeInt(writeBehindTotalCriticalOverflowCnt); + out.writeInt(writeBehindCriticalOverflowCnt); + out.writeInt(writeBehindErrorRetryCnt); + + out.writeInt(totalPartitionsCnt); + out.writeInt(rebalancingPartitionsCnt); + out.writeLong(keysToRebalanceLeft); + out.writeLong(rebalancingBytesRate); + out.writeLong(rebalancingKeysRate); + + out.writeLong(rebalancedKeys); + out.writeLong(estimatedRebalancingKeys); + out.writeLong(rebalanceStartTime); + out.writeLong(rebalanceFinishTime); + out.writeLong(rebalanceClearingPartitionsLeft); + + out.writeLong(entryProcessorPuts); + out.writeFloat(entryProcessorAverageInvocationTime); + out.writeLong(entryProcessorInvocations); + out.writeFloat(entryProcessorMaxInvocationTime); + out.writeFloat(entryProcessorMinInvocationTime); + out.writeLong(entryProcessorReadOnlyInvocations); + out.writeFloat(entryProcessorHitPercentage); + out.writeLong(entryProcessorHits); + out.writeLong(entryProcessorMisses); + out.writeFloat(entryProcessorMissPercentage); + out.writeLong(entryProcessorRemovals); + + out.writeLong(cacheSize); + out.writeBoolean(isEmpty); + out.writeInt(size); + out.writeInt(keySize); + } + + /** {@inheritDoc} */ + @Override public void readExternalData(byte protoVer, ObjectInput in) throws IOException, ClassNotFoundException { + reads = in.readLong(); + puts = in.readLong(); + hits = in.readLong(); + misses = in.readLong(); + txCommits = in.readLong(); + txRollbacks = in.readLong(); + evicts = in.readLong(); + removes = in.readLong(); + + putAvgTimeNanos = in.readFloat(); + getAvgTimeNanos = in.readFloat(); + rmvAvgTimeNanos = in.readFloat(); + commitAvgTimeNanos = in.readFloat(); + rollbackAvgTimeNanos = in.readFloat(); + + offHeapGets = in.readLong(); + offHeapPuts = in.readLong(); + offHeapRemoves = in.readLong(); + offHeapEvicts = in.readLong(); + offHeapHits = in.readLong(); + offHeapMisses = in.readLong(); + offHeapEntriesCnt = in.readLong(); + heapEntriesCnt = in.readLong(); + offHeapPrimaryEntriesCnt = in.readLong(); + offHeapBackupEntriesCnt = in.readLong(); + offHeapAllocatedSize = in.readLong(); + + dhtEvictQueueCurrSize = in.readInt(); + txThreadMapSize = in.readInt(); + txXidMapSize = in.readInt(); + txCommitQueueSize = in.readInt(); + txPrepareQueueSize = in.readInt(); + txStartVerCountsSize = in.readInt(); + txCommittedVersionsSize = in.readInt(); + txRolledbackVersionsSize = in.readInt(); + txDhtThreadMapSize = in.readInt(); + txDhtXidMapSize = in.readInt(); + txDhtCommitQueueSize = in.readInt(); + txDhtPrepareQueueSize = in.readInt(); + txDhtStartVerCountsSize = in.readInt(); + txDhtCommittedVersionsSize = in.readInt(); + txDhtRolledbackVersionsSize = in.readInt(); + writeBehindTotalCriticalOverflowCnt = in.readInt(); + writeBehindCriticalOverflowCnt = in.readInt(); + writeBehindErrorRetryCnt = in.readInt(); + + totalPartitionsCnt = in.readInt(); + rebalancingPartitionsCnt = in.readInt(); + keysToRebalanceLeft = in.readLong(); + rebalancingBytesRate = in.readLong(); + rebalancingKeysRate = in.readLong(); + + rebalancedKeys = in.readLong(); + estimatedRebalancingKeys = in.readLong(); + rebalanceStartTime = in.readLong(); + rebalanceFinishTime = in.readLong(); + rebalanceClearingPartitionsLeft = in.readLong(); + + entryProcessorPuts = in.readLong(); + entryProcessorAverageInvocationTime = in.readFloat(); + entryProcessorInvocations = in.readLong(); + entryProcessorMaxInvocationTime = in.readFloat(); + entryProcessorMinInvocationTime = in.readFloat(); + entryProcessorReadOnlyInvocations = in.readLong(); + entryProcessorHitPercentage = in.readFloat(); + entryProcessorHits = in.readLong(); + entryProcessorMisses = in.readLong(); + entryProcessorMissPercentage = in.readFloat(); + entryProcessorRemovals = in.readLong(); + + cacheSize = in.readLong(); + isEmpty = in.readBoolean(); + size = in.readInt(); + keySize = in.readInt(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java index 3bc2a6dcb0624..f9f384a7f9702 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObject.java @@ -118,4 +118,4 @@ public interface CacheObject extends Message { * @throws IgniteCheckedException If failed. */ public void prepareMarshal(CacheObjectValueContext ctx) throws IgniteCheckedException; -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectAdapter.java index e2a15ee5d0aed..5987b1317bb14 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectAdapter.java @@ -247,4 +247,4 @@ else if (off >= headSize) return true; } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java index 414bebbc2d27d..efe4ab196bc00 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectByteArrayImpl.java @@ -191,4 +191,4 @@ public CacheObjectByteArrayImpl(byte[] val) { public String toString() { return "CacheObjectByteArrayImpl [arrLen=" + (val != null ? val.length : 0) + ']'; } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java index 2124a97940b9b..b29c19e1e254c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheObjectImpl.java @@ -152,4 +152,4 @@ else if (kernalCtx.config().isPeerClassLoadingEnabled()) @Override public CacheObject prepareForCache(CacheObjectContext ctx) { return this; } -} \ No newline at end of file +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheOffheapEvictionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheOffheapEvictionManager.java index d737c8bfca328..6813fec446d9e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheOffheapEvictionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheOffheapEvictionManager.java @@ -19,7 +19,6 @@ import java.util.Collection; import org.apache.ignite.IgniteCheckedException; -import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.processors.cache.version.GridCacheVersionManager; @@ -32,11 +31,11 @@ public class CacheOffheapEvictionManager extends GridCacheManagerAdapter implements CacheEvictionManager { /** {@inheritDoc} */ @Override public void touch(IgniteTxEntry txEntry, boolean loc) { - touch(txEntry.cached(), null); + touch(txEntry.cached()); } /** {@inheritDoc} */ - @Override public void touch(GridCacheEntryEx e, AffinityTopologyVersion topVer) { + @Override public void touch(GridCacheEntryEx e) { if (e.detached()) return; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheStatisticsClearMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheStatisticsClearMessage.java new file mode 100644 index 0000000000000..5b2263081fbcd --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheStatisticsClearMessage.java @@ -0,0 +1,125 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.Collection; +import java.util.UUID; +import org.apache.ignite.internal.managers.discovery.DiscoCache; +import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.lang.IgniteUuid; +import org.jetbrains.annotations.Nullable; + +/** + * Cache statistics clear discovery message. + */ +public class CacheStatisticsClearMessage implements DiscoveryCustomMessage { + /** */ + private static final long serialVersionUID = 0L; + + /** Initial message flag mask. */ + private static final byte INITIAL_MSG_MASK = 0x01; + + /** Custom message ID. */ + private final IgniteUuid id = IgniteUuid.randomUuid(); + + /** Request id. */ + private final UUID reqId; + + /** Cache names. */ + private final Collection caches; + + /** Flags. */ + private final byte flags; + + /** + * Constructor for request. + * + * @param caches Collection of cache names. + */ + public CacheStatisticsClearMessage(UUID reqId, Collection caches) { + this.reqId = reqId; + this.caches = caches; + this.flags = INITIAL_MSG_MASK; + } + + /** + * Constructor for response. + * + * @param msg Request message. + */ + private CacheStatisticsClearMessage(CacheStatisticsClearMessage msg) { + this.reqId = msg.reqId; + this.caches = null; + this.flags = 0; + } + + /** {@inheritDoc} */ + @Override public IgniteUuid id() { + return this.id; + } + + /** {@inheritDoc} */ + @Override public boolean isMutable() { + return false; + } + + /** {@inheritDoc} */ + @Override public boolean stopProcess() { + return false; + } + + /** {@inheritDoc} */ + @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, AffinityTopologyVersion topVer, + DiscoCache discoCache) { + throw new UnsupportedOperationException(); + } + + /** {@inheritDoc} */ + @Nullable @Override public DiscoveryCustomMessage ackMessage() { + return initial() ? new CacheStatisticsClearMessage(this) : null; + } + + /** + * @return Cache names. + */ + public Collection caches() { + return this.caches; + } + + /** + * Initial message flag. + */ + public boolean initial() { + return (flags & INITIAL_MSG_MASK) != 0; + } + + /** + * @return Request id. + */ + public UUID requestId() { + return this.reqId; + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(CacheStatisticsClearMessage.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheStatisticsModeChangeMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheStatisticsModeChangeMessage.java index 40bcfaf12de16..e33256fbed341 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheStatisticsModeChangeMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CacheStatisticsModeChangeMessage.java @@ -100,6 +100,11 @@ public CacheStatisticsModeChangeMessage(UUID reqId, Collection caches, b return false; } + /** {@inheritDoc} */ + @Override public boolean stopProcess() { + return false; + } + /** {@inheritDoc} */ @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, AffinityTopologyVersion topVer, DiscoCache discoCache) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachesRegistry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachesRegistry.java new file mode 100644 index 0000000000000..649b4d1ecd20f --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/CachesRegistry.java @@ -0,0 +1,313 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.IgniteException; +import org.apache.ignite.IgniteLogger; +import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.failure.FailureContext; +import org.apache.ignite.failure.FailureType; +import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.util.future.GridFinishedFuture; +import org.apache.ignite.internal.util.lang.GridPlainRunnable; +import org.apache.ignite.internal.util.typedef.internal.CU; +import org.apache.ignite.internal.util.typedef.internal.U; +import org.jetbrains.annotations.Nullable; + +/** + * Class is responsible to hold and persist cache and cache group descriptors. + */ +public class CachesRegistry { + /** Logger. */ + private final IgniteLogger log; + + /** Cache shared context. */ + private final GridCacheSharedContext cctx; + + /** Registered cache groups (updated from exchange thread). */ + private final ConcurrentHashMap registeredGrps = new ConcurrentHashMap<>(); + + /** Registered caches (updated from exchange thread). */ + private final ConcurrentHashMap registeredCaches = new ConcurrentHashMap<>(); + + /** Last registered caches configuration persist future. */ + private volatile IgniteInternalFuture cachesConfPersistFuture; + + /** + * @param cctx Cache shared context. + */ + public CachesRegistry(GridCacheSharedContext cctx) { + assert cctx != null; + + this.cctx = cctx; + this.log = cctx.logger(getClass()); + } + + /** + * Removes currently registered cache groups and caches. + * Adds given cache groups and caches to registry. + * + * @param groupDescriptors Registered groups. + * @param cacheDescriptors Registered caches. + * @return Future that will be completed when all caches configurations will be persisted. + */ + public IgniteInternalFuture init( + Map groupDescriptors, + Map cacheDescriptors + ) { + unregisterAll(); + + return registerAllCachesAndGroups(groupDescriptors.values(), cacheDescriptors.values()); + } + + /** + * Adds cache group to registry. + * + * @param grpDesc Group description. + * @return Previously registered cache group or {@code null} otherwise. + */ + private CacheGroupDescriptor registerGroup(CacheGroupDescriptor grpDesc) { + return registeredGrps.put(grpDesc.groupId(), grpDesc); + } + + /** + * Adds cache to registry. + * + * @param desc Cache description. + * @return Previously registered cache or {@code null} otherwise. + */ + private DynamicCacheDescriptor registerCache(DynamicCacheDescriptor desc) { + return registeredCaches.put(desc.cacheId(), desc); + } + + /** + * Removes cache group from registry. + * + * @param grpId Group id. + * @return Unregistered cache group or {@code null} if group doesn't exist. + */ + public CacheGroupDescriptor unregisterGroup(int grpId) { + return registeredGrps.remove(grpId); + } + + /** + * @return All registered cache groups. + */ + public Map allGroups() { + return Collections.unmodifiableMap(registeredGrps); + } + + /** + * @param grpId Group ID. + * @return Group descriptor. + */ + public CacheGroupDescriptor group(int grpId) { + CacheGroupDescriptor desc = registeredGrps.get(grpId); + + assert desc != null : grpId; + + return desc; + } + + /** + * @param cacheId Cache ID. + * @return Cache descriptor if cache found. + */ + @Nullable public DynamicCacheDescriptor cache(int cacheId) { + return registeredCaches.get(cacheId); + } + + /** + * Removes cache from registry. + * + * @param cacheId Cache id. + * @return Unregistered cache or {@code null} if cache doesn't exist. + */ + @Nullable public DynamicCacheDescriptor unregisterCache(int cacheId) { + return registeredCaches.remove(cacheId); + } + + /** + * @return All registered cache groups. + */ + public Map allCaches() { + return Collections.unmodifiableMap(registeredCaches); + } + + /** + * Adds cache and caches groups that is not registered yet to registry. + * + * @param descs Cache and cache group descriptors. + * @return Future that will be completed when all unregistered cache configurations will be persisted. + */ + public IgniteInternalFuture addUnregistered(Collection descs) { + Collection groups = descs.stream() + .map(DynamicCacheDescriptor::groupDescriptor) + .filter(grpDesc -> !registeredGrps.containsKey(grpDesc.groupId())) + .collect(Collectors.toList()); + + Collection caches = descs.stream() + .filter(cacheDesc -> !registeredCaches.containsKey(cacheDesc.cacheId())) + .collect(Collectors.toList()); + + return registerAllCachesAndGroups(groups, caches); + } + + /** + * Adds caches and cache groups to start from {@code exchActions}. + * Removes caches and caches groups to stop from {@code exchActions}. + * + * @param exchActions Exchange actions. + * @return Future that will be completed when all unregistered cache configurations will be persisted. + */ + public IgniteInternalFuture update(ExchangeActions exchActions) { + for (ExchangeActions.CacheGroupActionData stopAction : exchActions.cacheGroupsToStop()) { + CacheGroupDescriptor rmvd = unregisterGroup(stopAction.descriptor().groupId()); + + assert rmvd != null : stopAction.descriptor().cacheOrGroupName(); + } + + for (ExchangeActions.CacheActionData req : exchActions.cacheStopRequests()) + unregisterCache(req.descriptor().cacheId()); + + Collection grpDescs = exchActions.cacheGroupsToStart().stream() + .map(ExchangeActions.CacheGroupActionData::descriptor) + .collect(Collectors.toList()); + + Collection cacheDescs = exchActions.cacheStartRequests().stream() + .map(ExchangeActions.CacheActionData::descriptor) + .collect(Collectors.toList()); + + return registerAllCachesAndGroups(grpDescs, cacheDescs); + } + + /** + * + */ + public void unregisterAll() { + registeredGrps.clear(); + + registeredCaches.clear(); + } + + /** + * Awaits last registered caches configurations persist future. + */ + private void waitLastRegistration() { + IgniteInternalFuture currentFut = cachesConfPersistFuture; + + if (currentFut != null && !currentFut.isDone()) { + try { + currentFut.get(); + } + catch (IgniteCheckedException e) { + throw new IgniteException("Failed to wait for last registered caches registration future", e); + } + + if (log.isInfoEnabled()) + log.info("Successfully awaited for last registered caches registration future"); + } + } + + /** + * Registers caches and groups. + * Persists caches configurations on disk if needed. + * + * @param groupDescriptors Cache group descriptors. + * @param cacheDescriptors Cache descriptors. + * @return Future that will be completed when all unregistered cache configurations will be persisted. + */ + private IgniteInternalFuture registerAllCachesAndGroups( + Collection groupDescriptors, + Collection cacheDescriptors + ) { + waitLastRegistration(); + + for (CacheGroupDescriptor grpDesc : groupDescriptors) + registerGroup(grpDesc); + + for (DynamicCacheDescriptor cacheDesc : cacheDescriptors) + registerCache(cacheDesc); + + List cachesToPersist = cacheDescriptors.stream() + .filter(cacheDesc -> shouldPersist(cacheDesc.cacheConfiguration())) + .collect(Collectors.toList()); + + if (cachesToPersist.isEmpty()) + return cachesConfPersistFuture = new GridFinishedFuture<>(); + + List cacheConfigsToPersist = cacheDescriptors.stream() + .map(DynamicCacheDescriptor::toStoredData) + .collect(Collectors.toList()); + + return cachesConfPersistFuture = persistCacheConfigurations(cacheConfigsToPersist); + } + + /** + * Checks whether given cache configuration should be persisted. + * + * @param cacheCfg Cache config. + * @return {@code True} if cache configuration should be persisted, {@code false} in other case. + */ + private boolean shouldPersist(CacheConfiguration cacheCfg) { + return cctx.pageStore() != null && + CU.isPersistentCache(cacheCfg, cctx.gridConfig().getDataStorageConfiguration()) && + !cctx.kernalContext().clientNode(); + } + + /** + * Persists cache configurations. + * + * @param cacheConfigsToPersist Cache configurations to persist. + * @return Future that will be completed when all cache configurations will be persisted to cache work directory. + */ + private IgniteInternalFuture persistCacheConfigurations(List cacheConfigsToPersist) { + // Pre-create cache work directories if they don't exist. + for (StoredCacheData data : cacheConfigsToPersist) { + try { + cctx.pageStore().checkAndInitCacheWorkDir(data.config()); + } + catch (IgniteCheckedException e) { + if (!cctx.kernalContext().isStopping()) { + cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + + U.error(log, "Failed to initialize cache work directory for " + data.config(), e); + } + } + } + + return cctx.kernalContext().closure().runLocalSafe(new GridPlainRunnable() { + @Override public void run() { + try { + for (StoredCacheData data : cacheConfigsToPersist) + cctx.cache().saveCacheConfiguration(data, false); + } + catch (IgniteCheckedException e) { + U.error(log, "Error while saving cache configurations on disk", e); + } + } + }); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDiscoveryMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDiscoveryMessage.java index e35d80e5c1c6f..ae76c950421db 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDiscoveryMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDiscoveryMessage.java @@ -172,6 +172,11 @@ public void updateTimeoutObject(ClientCacheUpdateTimeout updateTimeoutObj) { return false; } + /** {@inheritDoc} */ + @Override public boolean stopProcess() { + return false; + } + /** {@inheritDoc} */ @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, AffinityTopologyVersion topVer, DiscoCache discoCache) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDummyDiscoveryMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDummyDiscoveryMessage.java index 6ed3ecc505f25..d723eee69b438 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDummyDiscoveryMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClientCacheChangeDummyDiscoveryMessage.java @@ -52,9 +52,11 @@ public class ClientCacheChangeDummyDiscoveryMessage implements DiscoveryCustomMe * @param startReqs Caches start requests. * @param cachesToClose Cache to close. */ - public ClientCacheChangeDummyDiscoveryMessage(UUID reqId, + public ClientCacheChangeDummyDiscoveryMessage( + UUID reqId, @Nullable Map startReqs, - @Nullable Set cachesToClose) { + @Nullable Set cachesToClose + ) { assert reqId != null; assert startReqs != null ^ cachesToClose != null; @@ -104,6 +106,11 @@ Set cachesToClose() { throw new UnsupportedOperationException(); } + /** {@inheritDoc} */ + @Override public boolean stopProcess() { + return false; + } + /** {@inheritDoc} */ @Nullable @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, AffinityTopologyVersion topVer, DiscoCache discoCache) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java index 2b2fb559c182e..a9d68aad0574e 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ClusterCachesInfo.java @@ -24,6 +24,7 @@ import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -32,34 +33,41 @@ import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; +import java.util.stream.Collectors; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteLogger; import org.apache.ignite.cache.CacheExistsException; +import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.cluster.ClusterNode; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.DataStorageConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.GridCachePluginContext; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.IgniteInternalFuture; +import org.apache.ignite.internal.IgniteNodeAttributes; +import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cluster.ChangeGlobalStateFinishMessage; import org.apache.ignite.internal.processors.cluster.ChangeGlobalStateMessage; import org.apache.ignite.internal.processors.cluster.DiscoveryDataClusterState; import org.apache.ignite.internal.processors.query.QuerySchema; +import org.apache.ignite.internal.processors.query.QuerySchemaPatch; import org.apache.ignite.internal.processors.query.QueryUtils; import org.apache.ignite.internal.processors.query.schema.SchemaOperationException; -import org.apache.ignite.internal.util.GridConcurrentHashSet; +import org.apache.ignite.internal.util.lang.GridFunc; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.internal.util.typedef.T2; import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteInClosure; +import org.apache.ignite.lang.IgniteProductVersion; import org.apache.ignite.lang.IgniteUuid; +import org.apache.ignite.marshaller.jdk.JdkMarshaller; import org.apache.ignite.plugin.CachePluginContext; import org.apache.ignite.plugin.CachePluginProvider; import org.apache.ignite.plugin.PluginProvider; import org.apache.ignite.spi.discovery.DiscoveryDataBag; -import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.cache.CacheMode.LOCAL; @@ -71,6 +79,12 @@ * Logic related to cache discovery data processing. */ class ClusterCachesInfo { + /** Representation of null for restarting caches map */ + private static final IgniteUuid NULL_OBJECT = new IgniteUuid(); + + /** Version since which merge of config is supports. */ + private static final IgniteProductVersion V_MERGE_CONFIG_SINCE = IgniteProductVersion.fromString("2.5.0"); + /** */ private final GridKernalContext ctx; @@ -83,8 +97,8 @@ class ClusterCachesInfo { /** Cache templates. */ private final ConcurrentMap registeredTemplates = new ConcurrentHashMap<>(); - /** Caches currently being restarted. */ - private final Collection restartingCaches = new GridConcurrentHashSet<>(); + /** Caches currently being restarted (with restarter id). */ + private final ConcurrentHashMap restartingCaches = new ConcurrentHashMap<>(); /** */ private final IgniteLogger log; @@ -119,6 +133,84 @@ public ClusterCachesInfo(GridKernalContext ctx) { log = ctx.log(getClass()); } + /** + * Filters all dynamic cache descriptors and groups that were not presented on node start + * and were received with grid discovery data. + * + * @param localCachesOnStart Caches which were already presented on node start. + */ + public void filterDynamicCacheDescriptors(Set localCachesOnStart) { + if (ctx.isDaemon()) + return; + + filterRegisteredCachesAndCacheGroups(localCachesOnStart); + + List> locJoinStartCaches = locJoinCachesCtx.caches(); + + filterLocalJoinStartCaches(locJoinStartCaches); + + locJoinCachesCtx = new LocalJoinCachesContext( + locJoinStartCaches, + registeredCacheGrps, + registeredCaches); + } + + /** + * Filters from registered caches all caches that are not presented in node's local configuration. + * + * Then filters from registered cache groups all groups that became empty after registered caches were filtered. + * + * @param locCaches Caches from local node configuration (static configuration and persistent caches). + */ + private void filterRegisteredCachesAndCacheGroups(Set locCaches) { + //filter registered caches + Iterator> cachesIter = registeredCaches.entrySet().iterator(); + + while (cachesIter.hasNext()) { + Map.Entry e = cachesIter.next(); + + if (!locCaches.contains(e.getKey())) { + cachesIter.remove(); + + ctx.discovery().removeCacheFilter(e.getKey()); + } + } + + //filter registered cache groups + Iterator> grpsIter = registeredCacheGrps.entrySet().iterator(); + + while (grpsIter.hasNext()) { + Map.Entry e = grpsIter.next(); + + boolean removeGrp = true; + + for (DynamicCacheDescriptor cacheDescr : registeredCaches.values()) { + if (cacheDescr.groupId() == e.getKey()) { + removeGrp = false; + + break; + } + } + + if (removeGrp) { + grpsIter.remove(); + + ctx.discovery().removeCacheGroup(e.getValue()); + } + } + } + + /** + * Filters from local join context cache descriptors that should be started on join. + * + * @param locJoinStartCaches Collection to filter. + */ + private void filterLocalJoinStartCaches( + List> locJoinStartCaches) { + + locJoinStartCaches.removeIf(next -> !registeredCaches.containsKey(next.getKey().cacheName())); + } + /** * @param joinDiscoData Information about configured caches and templates. * @throws IgniteCheckedException If configuration validation failed. @@ -385,6 +477,31 @@ public void onClientCacheChange(ClientCacheChangeDiscoveryMessage msg, ClusterNo } } + /** + * Creates exchanges actions. Forms a list of caches and cache groups to be stopped + * due to dynamic cache start failure. + * + * @param failMsg Dynamic change request fail message. + * @param topVer Topology version. + */ + public void onCacheChangeRequested(DynamicCacheChangeFailureMessage failMsg, AffinityTopologyVersion topVer) { + ExchangeActions exchangeActions = new ExchangeActions(); + + List requests = new ArrayList<>(failMsg.cacheNames().size()); + + for (String cacheName : failMsg.cacheNames()) { + DynamicCacheDescriptor cacheDescr = registeredCaches.get(cacheName); + + assert cacheDescr != null : "Dynamic cache descriptor is missing [cacheName=" + cacheName + "]"; + + requests.add(DynamicCacheChangeRequest.stopRequest(ctx, cacheName, cacheDescr.sql(), true)); + } + + processCacheChangeRequests(exchangeActions, requests, topVer, false); + + failMsg.exchangeActions(exchangeActions); + } + /** * @param batch Cache change request. * @param topVer Topology version. @@ -437,303 +554,398 @@ private CacheChangeProcessResult processCacheChangeRequests( ExchangeActions exchangeActions, Collection reqs, AffinityTopologyVersion topVer, - boolean persistedCfgs) { + boolean persistedCfgs + ) { CacheChangeProcessResult res = new CacheChangeProcessResult(); final List> reqsToComplete = new ArrayList<>(); - for (DynamicCacheChangeRequest req : reqs) { - if (req.template()) { - CacheConfiguration ccfg = req.startCacheConfiguration(); + for (DynamicCacheChangeRequest req : reqs) + processCacheChangeRequest0(req, exchangeActions, topVer, persistedCfgs, res, reqsToComplete); + + if (!F.isEmpty(res.addedDescs)) { + AffinityTopologyVersion startTopVer = res.needExchange ? topVer.nextMinorVersion() : topVer; - assert ccfg != null : req; + for (DynamicCacheDescriptor desc : res.addedDescs) { + assert desc.template() || res.needExchange; - DynamicCacheDescriptor desc = registeredTemplates.get(req.cacheName()); + desc.startTopologyVersion(startTopVer); + } + } - if (desc == null) { - DynamicCacheDescriptor templateDesc = new DynamicCacheDescriptor(ctx, - ccfg, - req.cacheType(), - null, - true, - req.initiatingNodeId(), - false, - false, - req.deploymentId(), - req.schema()); + if (!F.isEmpty(reqsToComplete)) { + ctx.closure().callLocalSafe(new Callable() { + @Override public Void call() throws Exception { + for (T2 t : reqsToComplete) { + final DynamicCacheChangeRequest req = t.get1(); + AffinityTopologyVersion waitTopVer = t.get2(); - DynamicCacheDescriptor old = registeredTemplates().put(ccfg.getName(), templateDesc); + IgniteInternalFuture fut = waitTopVer != null ? + ctx.cache().context().exchange().affinityReadyFuture(waitTopVer) : null; - assert old == null; + if (fut == null || fut.isDone()) + ctx.cache().completeCacheStartFuture(req, false, null); + else { + fut.listen(new IgniteInClosure>() { + @Override public void apply(IgniteInternalFuture fut) { + ctx.cache().completeCacheStartFuture(req, false, null); + } + }); + } + } - res.addedDescs.add(templateDesc); + return null; } + }); + } - if (!persistedCfgs) - ctx.cache().completeTemplateAddFuture(ccfg.getName(), req.deploymentId()); + return res; + } - continue; - } + /** + * @param req Cache change request. + * @param exchangeActions Exchange actions to update. + * @param topVer Topology version. + * @param persistedCfgs {@code True} if process start of persisted caches during cluster activation. + * @param res Accumulator for cache change process results. + * @param reqsToComplete Accumulator for cache change requests which should be completed after + * ({@link org.apache.ignite.internal.processors.cache.GridCacheProcessor#pendingFuts} + */ + private void processCacheChangeRequest0( + DynamicCacheChangeRequest req, + ExchangeActions exchangeActions, + AffinityTopologyVersion topVer, + boolean persistedCfgs, + CacheChangeProcessResult res, + List> reqsToComplete + ) { + String cacheName = req.cacheName(); + + if (req.template()) { + processTemplateAddRequest(persistedCfgs, res, req); - assert !req.clientStartOnly() : req; + return; + } - DynamicCacheDescriptor desc = registeredCaches.get(req.cacheName()); + assert !req.clientStartOnly() : req; - boolean needExchange = false; + DynamicCacheDescriptor desc = registeredCaches.get(cacheName); - boolean clientCacheStart = false; + boolean needExchange = false; - AffinityTopologyVersion waitTopVer = null; + boolean clientCacheStart = false; - if (req.start()) { - // Starting a new cache. - if (desc == null) { - String conflictErr = checkCacheConflict(req.startCacheConfiguration()); + AffinityTopologyVersion waitTopVer = null; - if (conflictErr != null) { - U.warn(log, "Ignore cache start request. " + conflictErr); + if (req.start()) { + boolean proceedFuther = true; - IgniteCheckedException err = new IgniteCheckedException("Failed to start " + - "cache. " + conflictErr); + if (restartingCaches.containsKey(cacheName) && + ((req.restartId() == null && restartingCaches.get(cacheName) != NULL_OBJECT) + || (req.restartId() != null &&!req.restartId().equals(restartingCaches.get(cacheName))))) { - if (persistedCfgs) - res.errs.add(err); - else - ctx.cache().completeCacheStartFuture(req, false, err); + if (req.failIfExists()) { + ctx.cache().completeCacheStartFuture(req, false, + new CacheExistsException("Failed to start cache (a cache is restarting): " + cacheName)); + } - continue; - } + proceedFuther = false; + } - if (req.clientStartOnly()) { - assert !persistedCfgs; + if (proceedFuther) { + if (desc == null) { /* Starting a new cache.*/ + if (!processStartNewCacheRequest(exchangeActions, topVer, persistedCfgs, res, req, cacheName)) + return; - ctx.cache().completeCacheStartFuture(req, false, new IgniteCheckedException("Failed to start " + - "client cache (a cache with the given name is not started): " + req.cacheName())); - } - else { - SchemaOperationException err = QueryUtils.checkQueryEntityConflicts( - req.startCacheConfiguration(), registeredCaches.values()); + needExchange = true; + } + else { + clientCacheStart = processStartAlreadyStartedCacheRequest(topVer, persistedCfgs, req, cacheName, desc); + + if (!clientCacheStart) { + if (desc.clientCacheStartVersion() != null) + waitTopVer = desc.clientCacheStartVersion(); + else { + AffinityTopologyVersion nodeStartVer = + new AffinityTopologyVersion(ctx.discovery().localNode().order(), 0); - if (err != null) { - if (persistedCfgs) - res.errs.add(err); + if (desc.startTopologyVersion() != null) + waitTopVer = desc.startTopologyVersion(); else - ctx.cache().completeCacheStartFuture(req, false, err); + waitTopVer = desc.receivedFromStartVersion(); - continue; + if (waitTopVer == null || nodeStartVer.compareTo(waitTopVer) > 0) + waitTopVer = nodeStartVer; } + } + } + } + } + else if (req.resetLostPartitions()) { + if (desc != null) { + needExchange = true; - CacheConfiguration ccfg = req.startCacheConfiguration(); + exchangeActions.addCacheToResetLostPartitions(req, desc); + } + } + else if (req.stop()) { + if (desc != null) { + if (req.sql() && !desc.sql()) { + ctx.cache().completeCacheStartFuture(req, false, + new IgniteCheckedException("Only cache created with CREATE TABLE may be removed with " + + "DROP TABLE [cacheName=" + cacheName + ']')); - assert req.cacheType() != null : req; - assert F.eq(ccfg.getName(), req.cacheName()) : req; + return; + } - int cacheId = CU.cacheId(req.cacheName()); + processStopCacheRequest(exchangeActions, req, cacheName, desc); - CacheGroupDescriptor grpDesc = registerCacheGroup(exchangeActions, - topVer, - ccfg, - cacheId, - req.initiatingNodeId(), - req.deploymentId()); + needExchange = true; + } + } + else + assert false : req; - DynamicCacheDescriptor startDesc = new DynamicCacheDescriptor(ctx, - ccfg, - req.cacheType(), - grpDesc, - false, - req.initiatingNodeId(), - false, - req.sql(), - req.deploymentId(), - req.schema()); + if (!needExchange) { + if (!clientCacheStart && ctx.localNodeId().equals(req.initiatingNodeId())) + reqsToComplete.add(new T2<>(req, waitTopVer)); + } + else + res.needExchange = true; + } - DynamicCacheDescriptor old = registeredCaches.put(ccfg.getName(), startDesc); + /** + * @param req Cache change request. + * @param exchangeActions Exchange actions to update. + * @param cacheName Cache name. + * @param desc Dynamic cache descriptor. + */ + private void processStopCacheRequest( + ExchangeActions exchangeActions, + DynamicCacheChangeRequest req, + String cacheName, + DynamicCacheDescriptor desc + ) { + DynamicCacheDescriptor old = registeredCaches.remove(cacheName); - restartingCaches.remove(ccfg.getName()); + if (req.restart()) { + IgniteUuid restartId = req.restartId(); - assert old == null; + restartingCaches.put(cacheName, restartId == null ? NULL_OBJECT : restartId); + } - ctx.discovery().setCacheFilter( - startDesc.cacheId(), - grpDesc.groupId(), - ccfg.getName(), - ccfg.getNearConfiguration() != null); + assert old != null && old == desc : "Dynamic cache map was concurrently modified [req=" + req + ']'; - if (!persistedCfgs) { - ctx.discovery().addClientNode(req.cacheName(), - req.initiatingNodeId(), - req.nearCacheConfiguration() != null); - } + ctx.discovery().removeCacheFilter(cacheName); - res.addedDescs.add(startDesc); + exchangeActions.addCacheToStop(req, desc); - exchangeActions.addCacheToStart(req, startDesc); + CacheGroupDescriptor grpDesc = registeredCacheGrps.get(desc.groupId()); - needExchange = true; - } - } - else { - assert !persistedCfgs; - assert req.initiatingNodeId() != null : req; + assert grpDesc != null && grpDesc.groupId() == desc.groupId() : desc; - if (req.failIfExists()) { - ctx.cache().completeCacheStartFuture(req, false, - new CacheExistsException("Failed to start cache " + - "(a cache with the same name is already started): " + req.cacheName())); - } - else { - // Cache already exists, it is possible client cache is needed. - ClusterNode node = ctx.discovery().node(req.initiatingNodeId()); + grpDesc.onCacheStopped(desc.cacheName(), desc.cacheId()); - boolean clientReq = node != null && - !ctx.discovery().cacheAffinityNode(node, req.cacheName()); + if (!grpDesc.hasCaches()) { + registeredCacheGrps.remove(grpDesc.groupId()); - if (clientReq) { - ctx.discovery().addClientNode(req.cacheName(), - req.initiatingNodeId(), - req.nearCacheConfiguration() != null); + ctx.discovery().removeCacheGroup(grpDesc); - if (node.id().equals(req.initiatingNodeId())) { - desc.clientCacheStartVersion(topVer); + exchangeActions.addCacheGroupToStop(grpDesc, req.destroy()); - clientCacheStart = true; + assert exchangeActions.checkStopRequestConsistency(grpDesc.groupId()); - ctx.discovery().clientCacheStartEvent(req.requestId(), F.asMap(req.cacheName(), req), null); - } - } - } + // If all caches in group will be destroyed it is not necessary to destroy single cache + // because group will be stopped anyway. + if (req.destroy()) { + for (ExchangeActions.CacheActionData action : exchangeActions.cacheStopRequests()) { + if (action.descriptor().groupId() == grpDesc.groupId()) + action.request().destroy(false); } + } + } + } - if (!needExchange && !clientCacheStart && desc != null) { - if (desc.clientCacheStartVersion() != null) - waitTopVer = desc.clientCacheStartVersion(); - else { - AffinityTopologyVersion nodeStartVer = - new AffinityTopologyVersion(ctx.discovery().localNode().order(), 0); - - if (desc.startTopologyVersion() != null) - waitTopVer = desc.startTopologyVersion(); - else - waitTopVer = desc.receivedFromStartVersion(); + /** + * @param persistedCfgs {@code True} if process start of persisted caches during cluster activation. + * @param res Accumulator for cache change process results. + * @param req Dynamic cache change request. + */ + private void processTemplateAddRequest( + boolean persistedCfgs, + CacheChangeProcessResult res, + DynamicCacheChangeRequest req + ) { + CacheConfiguration ccfg = req.startCacheConfiguration(); - if (waitTopVer == null || nodeStartVer.compareTo(waitTopVer) > 0) - waitTopVer = nodeStartVer; - } - } - } - else if (req.resetLostPartitions()) { - if (desc != null) { - needExchange = true; + assert ccfg != null : req; - exchangeActions.addCacheToResetLostPartitions(req, desc); - } - } - else if (req.stop()) { - if (desc != null) { - if (req.sql() && !desc.sql()) { - ctx.cache().completeCacheStartFuture(req, false, - new IgniteCheckedException("Only cache created with CREATE TABLE may be removed with " + - "DROP TABLE [cacheName=" + req.cacheName() + ']')); - - continue; - } + DynamicCacheDescriptor desc = registeredTemplates.get(req.cacheName()); - if (!req.sql() && desc.sql()) { - ctx.cache().completeCacheStartFuture(req, false, - new IgniteCheckedException("Only cache created with cache API may be removed with " + - "direct call to destroyCache [cacheName=" + req.cacheName() + ']')); + if (desc == null) { + DynamicCacheDescriptor templateDesc = new DynamicCacheDescriptor(ctx, + ccfg, + req.cacheType(), + null, + true, + req.initiatingNodeId(), + false, + false, + req.deploymentId(), + req.schema()); - continue; - } + DynamicCacheDescriptor old = registeredTemplates().put(ccfg.getName(), templateDesc); - DynamicCacheDescriptor old = registeredCaches.remove(req.cacheName()); + assert old == null; - if (req.restart()) - restartingCaches.add(req.cacheName()); + res.addedDescs.add(templateDesc); + } - assert old != null && old == desc : "Dynamic cache map was concurrently modified [req=" + req + ']'; + if (!persistedCfgs) + ctx.cache().completeTemplateAddFuture(ccfg.getName(), req.deploymentId()); + } - ctx.discovery().removeCacheFilter(req.cacheName()); + /** + * @param topVer Topology version. + * @param persistedCfgs {@code True} if process start of persisted caches during cluster activation. + * @param req Cache change request. + * @param cacheName Cache name. + * @param desc Dynamic cache descriptor. + * @return True if it is needed to start client cache. + */ + private boolean processStartAlreadyStartedCacheRequest( + AffinityTopologyVersion topVer, + boolean persistedCfgs, + DynamicCacheChangeRequest req, + String cacheName, + DynamicCacheDescriptor desc + ) { + assert !persistedCfgs; + assert req.initiatingNodeId() != null : req; + + if (req.failIfExists()) { + ctx.cache().completeCacheStartFuture(req, false, + new CacheExistsException("Failed to start cache " + + "(a cache with the same name is already started): " + cacheName)); + } + else { + // Cache already exists, it is possible client cache is needed. + ClusterNode node = ctx.discovery().node(req.initiatingNodeId()); - needExchange = true; + boolean clientReq = node != null && + !ctx.discovery().cacheAffinityNode(node, cacheName); - exchangeActions.addCacheToStop(req, desc); + if (clientReq) { + ctx.discovery().addClientNode(cacheName, + req.initiatingNodeId(), + req.nearCacheConfiguration() != null); - CacheGroupDescriptor grpDesc = registeredCacheGrps.get(desc.groupId()); + if (node.id().equals(req.initiatingNodeId())) { + desc.clientCacheStartVersion(topVer); - assert grpDesc != null && grpDesc.groupId() == desc.groupId() : desc; + ctx.discovery().clientCacheStartEvent(req.requestId(), F.asMap(cacheName, req), null); - grpDesc.onCacheStopped(desc.cacheName(), desc.cacheId()); + return true; + } + } + } - if (!grpDesc.hasCaches()) { - registeredCacheGrps.remove(grpDesc.groupId()); + return false; + } - ctx.discovery().removeCacheGroup(grpDesc); + /** + * @param exchangeActions Exchange actions to update. + * @param topVer Topology version. + * @param persistedCfgs {@code True} if process start of persisted caches during cluster activation. + * @param res Accumulator for cache change process results. + * @param req Cache change request. + * @param cacheName Cache name. + * @return True if there was no errors. + */ + private boolean processStartNewCacheRequest( + ExchangeActions exchangeActions, + AffinityTopologyVersion topVer, + boolean persistedCfgs, + CacheChangeProcessResult res, + DynamicCacheChangeRequest req, + String cacheName + ) { + String conflictErr = checkCacheConflict(req.startCacheConfiguration()); - exchangeActions.addCacheGroupToStop(grpDesc, req.destroy()); + if (conflictErr != null) { + U.warn(log, "Ignore cache start request. " + conflictErr); - assert exchangeActions.checkStopRequestConsistency(grpDesc.groupId()); + IgniteCheckedException err = new IgniteCheckedException("Failed to start " + + "cache. " + conflictErr); - // If all caches in group will be destroyed it is not necessary to destroy single cache - // because group will be stopped anyway. - if (req.destroy()) { - for (ExchangeActions.CacheActionData action : exchangeActions.cacheStopRequests()) { - if (action.descriptor().groupId() == grpDesc.groupId()) - action.request().destroy(false); - } - } - } - } - } + if (persistedCfgs) + res.errs.add(err); else - assert false : req; + ctx.cache().completeCacheStartFuture(req, false, err); - if (!needExchange) { - if (!clientCacheStart && ctx.localNodeId().equals(req.initiatingNodeId())) - reqsToComplete.add(new T2<>(req, waitTopVer)); - } - else - res.needExchange = true; + return false; } - if (!F.isEmpty(res.addedDescs)) { - AffinityTopologyVersion startTopVer = res.needExchange ? topVer.nextMinorVersion() : topVer; + SchemaOperationException err = QueryUtils.checkQueryEntityConflicts( + req.startCacheConfiguration(), registeredCaches.values()); - for (DynamicCacheDescriptor desc : res.addedDescs) { - assert desc.template() || res.needExchange; + if (err != null) { + if (persistedCfgs) + res.errs.add(err); + else + ctx.cache().completeCacheStartFuture(req, false, err); - desc.startTopologyVersion(startTopVer); - } + return false; } - if (!F.isEmpty(reqsToComplete)) { - ctx.closure().callLocalSafe(new Callable() { - @Override public Void call() throws Exception { - for (T2 t : reqsToComplete) { - final DynamicCacheChangeRequest req = t.get1(); - AffinityTopologyVersion waitTopVer = t.get2(); + CacheConfiguration ccfg = req.startCacheConfiguration(); - IgniteInternalFuture fut = waitTopVer != null ? - ctx.cache().context().exchange().affinityReadyFuture(waitTopVer) : null; + assert req.cacheType() != null : req; + assert F.eq(ccfg.getName(), cacheName) : req; - if (fut == null || fut.isDone()) - ctx.cache().completeCacheStartFuture(req, false, null); - else { - fut.listen(new IgniteInClosure>() { - @Override public void apply(IgniteInternalFuture fut) { - ctx.cache().completeCacheStartFuture(req, false, null); - } - }); - } - } + int cacheId = CU.cacheId(cacheName); - return null; - } - }); + CacheGroupDescriptor grpDesc = registerCacheGroup(exchangeActions, + topVer, + ccfg, + cacheId, + req.initiatingNodeId(), + req.deploymentId()); + + DynamicCacheDescriptor startDesc = new DynamicCacheDescriptor(ctx, + ccfg, + req.cacheType(), + grpDesc, + false, + req.initiatingNodeId(), + false, + req.sql(), + req.deploymentId(), + req.schema()); + + DynamicCacheDescriptor old = registeredCaches.put(ccfg.getName(), startDesc); + + restartingCaches.remove(ccfg.getName()); + + assert old == null; + + ctx.discovery().setCacheFilter( + startDesc.cacheId(), + grpDesc.groupId(), + ccfg.getName(), + ccfg.getNearConfiguration() != null); + + if (!persistedCfgs) { + ctx.discovery().addClientNode(cacheName, + req.initiatingNodeId(), + req.nearCacheConfiguration() != null); } - return res; + res.addedDescs.add(startDesc); + + exchangeActions.addCacheToStart(req, startDesc); + + return true; } /** @@ -755,7 +967,7 @@ boolean hasRestartingCaches() { * @return Collection of currently restarting caches. */ Collection restartingCaches() { - return restartingCaches; + return restartingCaches.keySet(); } /** @@ -963,7 +1175,7 @@ private CacheNodeCommonDiscoveryData collectCommonDiscoveryData() { templates.put(desc.cacheName(), cacheData); } - Collection restarting = new HashSet<>(restartingCaches); + Collection restarting = new HashSet<>(restartingCaches.keySet()); return new CacheNodeCommonDiscoveryData(caches, templates, @@ -987,54 +1199,77 @@ public void onGridDataReceived(DiscoveryDataBag.GridDiscoveryData data) { // CacheGroup configurations that were created from local node configuration. Map locCacheGrps = new HashMap<>(registeredCacheGroups()); - // Replace locally registered data with actual data received from cluster. - registeredCaches.clear(); - registeredCacheGrps.clear(); - ctx.discovery().cleanCachesAndGroups(); + //Replace locally registered data with actual data received from cluster. + cleanCachesAndGroups(); - for (CacheGroupData grpData : cachesData.cacheGroups().values()) { - CacheGroupDescriptor grpDesc = new CacheGroupDescriptor( - grpData.config(), - grpData.groupName(), - grpData.groupId(), - grpData.receivedFrom(), - grpData.startTopologyVersion(), - grpData.deploymentId(), - grpData.caches(), - grpData.persistenceEnabled(), - grpData.walEnabled(), - grpData.walChangeRequests()); + registerReceivedCacheGroups(cachesData, locCacheGrps); - if (locCacheGrps.containsKey(grpDesc.groupId())) { - CacheGroupDescriptor locGrpCfg = locCacheGrps.get(grpDesc.groupId()); + registerReceivedCacheTemplates(cachesData); - grpDesc.mergeWith(locGrpCfg); - } + registerReceivedCaches(cachesData); - CacheGroupDescriptor old = registeredCacheGrps.put(grpDesc.groupId(), grpDesc); + addReceivedClientNodesToDiscovery(cachesData); - assert old == null : old; + String conflictErr = validateRegisteredCaches(); - ctx.discovery().addCacheGroup(grpDesc, - grpData.config().getNodeFilter(), - grpData.config().getCacheMode()); + gridData = new GridData(joinDiscoData, cachesData, conflictErr); + + if (cachesOnDisconnect == null || cachesOnDisconnect.clusterActive()) + initStartCachesForLocalJoin(false, disconnectedState()); + } + + /** + * Validation {@link #registeredCaches} on conflicts. + * + * @return Error message if conflicts was found. + */ + @Nullable private String validateRegisteredCaches() { + String conflictErr = null; + + if (joinDiscoData != null) { + for (Map.Entry e : joinDiscoData.caches().entrySet()) { + if (!registeredCaches.containsKey(e.getKey())) { + conflictErr = checkCacheConflict(e.getValue().cacheData().config()); + + if (conflictErr != null) { + conflictErr = "Failed to start configured cache due to conflict with started caches. " + + conflictErr; + + break; + } + } + } } - for (CacheData cacheData : cachesData.templates().values()) { - DynamicCacheDescriptor desc = new DynamicCacheDescriptor( - ctx, - cacheData.cacheConfiguration(), - cacheData.cacheType(), - null, - true, - cacheData.receivedFrom(), - cacheData.staticallyConfigured(), - false, - cacheData.deploymentId(), - cacheData.schema()); + return conflictErr; + } - registeredTemplates.put(cacheData.cacheConfiguration().getName(), desc); + /** + * Adding received client nodes to discovery if needed. + * + * @param cachesData Data received from cluster. + */ + private void addReceivedClientNodesToDiscovery(CacheNodeCommonDiscoveryData cachesData) { + if (!F.isEmpty(cachesData.clientNodesMap())) { + for (Map.Entry> entry : cachesData.clientNodesMap().entrySet()) { + String cacheName = entry.getKey(); + + for (Map.Entry tup : entry.getValue().entrySet()) + ctx.discovery().addClientNode(cacheName, tup.getKey(), tup.getValue()); + } } + } + + /** + * Register caches received from cluster. + * + * @param cachesData Data received from cluster. + */ + private void registerReceivedCaches(CacheNodeCommonDiscoveryData cachesData) { + Map patchesToApply = new HashMap<>(); + Collection cachesToSave = new HashSet<>(); + + boolean hasSchemaPatchConflict = false; for (CacheData cacheData : cachesData.caches().values()) { CacheGroupDescriptor grpDesc = registeredCacheGrps.get(cacheData.groupId()); @@ -1053,7 +1288,22 @@ public void onGridDataReceived(DiscoveryDataBag.GridDiscoveryData data) { cacheData.staticallyConfigured(), cacheData.sql(), cacheData.deploymentId(), - cacheData.schema()); + new QuerySchema(cacheData.schema().entities()) + ); + + Collection localQueryEntities = getLocalQueryEntities(cfg.getName()); + + QuerySchemaPatch schemaPatch = desc.makeSchemaPatch(localQueryEntities); + + if (schemaPatch.hasConflicts()) { + hasSchemaPatchConflict = true; + + log.warning("Skipping apply patch because conflicts : " + schemaPatch.getConflictsMessage()); + } + else if (!schemaPatch.isEmpty()) + patchesToApply.put(desc, schemaPatch); + else if (!GridFunc.eqNotOrdered(desc.schema().entities(), localQueryEntities)) + cachesToSave.add(desc); //received config is different of local config - need to resave desc.receivedOnDiscovery(true); @@ -1066,36 +1316,140 @@ public void onGridDataReceived(DiscoveryDataBag.GridDiscoveryData data) { cfg.getNearConfiguration() != null); } - if (!F.isEmpty(cachesData.clientNodesMap())) { - for (Map.Entry> entry : cachesData.clientNodesMap().entrySet()) { - String cacheName = entry.getKey(); + updateRegisteredCachesIfNeeded(patchesToApply, cachesToSave, hasSchemaPatchConflict); + } - for (Map.Entry tup : entry.getValue().entrySet()) - ctx.discovery().addClientNode(cacheName, tup.getKey(), tup.getValue()); + /** + * Merging config or resaving it if it needed. + * + * @param patchesToApply Patches which need to apply. + * @param cachesToSave Caches which need to resave. + * @param hasSchemaPatchConflict {@code true} if we have conflict during making patch. + */ + private void updateRegisteredCachesIfNeeded(Map patchesToApply, + Collection cachesToSave, boolean hasSchemaPatchConflict) { + //Skip merge of config if least one conflict was found. + if (!hasSchemaPatchConflict && isMergeConfigSupports(ctx.discovery().localNode())) { + boolean isClusterActive = ctx.state().clusterState().active(); + + //Merge of config for cluster only for inactive grid. + if (!isClusterActive && !patchesToApply.isEmpty()) { + for (Map.Entry entry : patchesToApply.entrySet()) { + if (entry.getKey().applySchemaPatch(entry.getValue())) + saveCacheConfiguration(entry.getKey()); + } + + for (DynamicCacheDescriptor descriptor : cachesToSave) { + saveCacheConfiguration(descriptor); + } + } + else if (patchesToApply.isEmpty()) { + for (DynamicCacheDescriptor descriptor : cachesToSave) { + saveCacheConfiguration(descriptor); + } } } + } - String conflictErr = null; + /** + * Register cache templates received from cluster. + * + * @param cachesData Data received from cluster. + */ + private void registerReceivedCacheTemplates(CacheNodeCommonDiscoveryData cachesData) { + for (CacheData cacheData : cachesData.templates().values()) { + DynamicCacheDescriptor desc = new DynamicCacheDescriptor( + ctx, + cacheData.cacheConfiguration(), + cacheData.cacheType(), + null, + true, + cacheData.receivedFrom(), + cacheData.staticallyConfigured(), + false, + cacheData.deploymentId(), + cacheData.schema()); - if (joinDiscoData != null) { - for (Map.Entry e : joinDiscoData.caches().entrySet()) { - if (!registeredCaches.containsKey(e.getKey())) { - conflictErr = checkCacheConflict(e.getValue().cacheData().config()); + registeredTemplates.put(cacheData.cacheConfiguration().getName(), desc); + } + } - if (conflictErr != null) { - conflictErr = "Failed to start configured cache due to conflict with started caches. " + - conflictErr; + /** + * Register cache groups received from cluster. + * + * @param cachesData Data received from cluster. + * @param locCacheGrps Current local cache groups. + */ + private void registerReceivedCacheGroups(CacheNodeCommonDiscoveryData cachesData, + Map locCacheGrps) { + for (CacheGroupData grpData : cachesData.cacheGroups().values()) { + CacheGroupDescriptor grpDesc = new CacheGroupDescriptor( + grpData.config(), + grpData.groupName(), + grpData.groupId(), + grpData.receivedFrom(), + grpData.startTopologyVersion(), + grpData.deploymentId(), + grpData.caches(), + grpData.persistenceEnabled(), + grpData.walEnabled(), + grpData.walChangeRequests()); - break; - } - } + if (locCacheGrps.containsKey(grpDesc.groupId())) { + CacheGroupDescriptor locGrpCfg = locCacheGrps.get(grpDesc.groupId()); + + grpDesc.mergeWith(locGrpCfg); } + + CacheGroupDescriptor old = registeredCacheGrps.put(grpDesc.groupId(), grpDesc); + + assert old == null : old; + + ctx.discovery().addCacheGroup(grpDesc, + grpData.config().getNodeFilter(), + grpData.config().getCacheMode()); } + } - gridData = new GridData(joinDiscoData, cachesData, conflictErr); + /** + * Clean local registered caches and groups + */ + private void cleanCachesAndGroups() { + registeredCaches.clear(); + registeredCacheGrps.clear(); + ctx.discovery().cleanCachesAndGroups(); + } - if (cachesOnDisconnect == null || cachesOnDisconnect.clusterActive()) - initStartCachesForLocalJoin(false, disconnectedState()); + /** + * Save dynamic cache descriptor on disk. + * + * @param desc Cache to save. + */ + private void saveCacheConfiguration(DynamicCacheDescriptor desc) { + try { + ctx.cache().saveCacheConfiguration(desc); + } + catch (IgniteCheckedException e) { + log.error("Error while saving cache configuration to disk, cfg = " + desc.cacheConfiguration(), e); + } + } + + /** + * Get started node query entities by cacheName. + * + * @param cacheName Cache for which query entities will be returned. + * @return Local query entities. + */ + private Collection getLocalQueryEntities(String cacheName) { + if (joinDiscoData == null) + return Collections.emptyList(); + + CacheJoinNodeDiscoveryData.CacheInfo cacheInfo = joinDiscoData.caches().get(cacheName); + + if (cacheInfo == null) + return Collections.emptyList(); + + return cacheInfo.cacheData().queryEntities(); } /** @@ -1105,8 +1459,6 @@ public void onGridDataReceived(DiscoveryDataBag.GridDiscoveryData data) { * @param firstNode {@code True} if first node in cluster starts. */ private void initStartCachesForLocalJoin(boolean firstNode, boolean reconnect) { - assert locJoinCachesCtx == null : locJoinCachesCtx; - if (ctx.state().clusterState().transition()) { joinOnTransition = true; @@ -1144,7 +1496,7 @@ private void initStartCachesForLocalJoin(boolean firstNode, boolean reconnect) { desc.staticallyConfigured(), desc.sql(), desc.deploymentId(), - new QuerySchema(locCfg.cacheData().queryEntities())); + desc.schema().copy()); desc0.startTopologyVersion(desc.startTopologyVersion()); desc0.receivedFromStartVersion(desc.receivedFromStartVersion()); @@ -1190,7 +1542,8 @@ public void onStateChangeFinish(ChangeGlobalStateFinishMessage msg) { * @return Exchange action. * @throws IgniteCheckedException If configuration validation failed. */ - public ExchangeActions onStateChangeRequest(ChangeGlobalStateMessage msg, AffinityTopologyVersion topVer, DiscoveryDataClusterState curState) + public ExchangeActions onStateChangeRequest(ChangeGlobalStateMessage msg, AffinityTopologyVersion topVer, + DiscoveryDataClusterState curState) throws IgniteCheckedException { ExchangeActions exchangeActions = new ExchangeActions(); @@ -1306,6 +1659,53 @@ else if (joiningNodeData instanceof CacheJoinNodeDiscoveryData) } } + /** + * @param data Joining node data. + * @return Message with error or null if everything was OK. + */ + public String validateJoiningNodeData(DiscoveryDataBag.JoiningNodeDiscoveryData data) { + if (data.hasJoiningNodeData()) { + Serializable joiningNodeData = data.joiningNodeData(); + + if (joiningNodeData instanceof CacheJoinNodeDiscoveryData) { + CacheJoinNodeDiscoveryData joinData = (CacheJoinNodeDiscoveryData)joiningNodeData; + + Set problemCaches = null; + + for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : joinData.caches().values()) { + CacheConfiguration cfg = cacheInfo.cacheData().config(); + + if (!registeredCaches.containsKey(cfg.getName())) { + String conflictErr = checkCacheConflict(cfg); + + if (conflictErr != null) { + U.warn(log, "Ignore cache received from joining node. " + conflictErr); + + continue; + } + + long flags = cacheInfo.getFlags(); + + if (flags == 1L) { + if (problemCaches == null) + problemCaches = new HashSet<>(); + + problemCaches.add(cfg.getName()); + } + } + } + + if (!F.isEmpty(problemCaches)) + return problemCaches.stream().collect(Collectors.joining(", ", + "Joining node has caches with data which are not presented on cluster, " + + "it could mean that they were already destroyed, to add the node to cluster - " + + "remove directories with the caches[", "]")); + } + } + + return null; + } + /** * @param clientData Discovery data. * @param clientNodeId Client node ID. @@ -1385,26 +1785,14 @@ private String checkCacheConflict(CacheConfiguration cfg) { * @return Configuration conflict error. */ private String processJoiningNode(CacheJoinNodeDiscoveryData joinData, UUID nodeId, boolean locJoin) { - for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : joinData.templates().values()) { - CacheConfiguration cfg = cacheInfo.cacheData().config(); + registerNewCacheTemplates(joinData, nodeId); - if (!registeredTemplates.containsKey(cfg.getName())) { - DynamicCacheDescriptor desc = new DynamicCacheDescriptor(ctx, - cfg, - cacheInfo.cacheType(), - null, - true, - nodeId, - true, - false, - joinData.cacheDeploymentId(), - new QuerySchema(cacheInfo.cacheData().queryEntities())); + Map patchesToApply = new HashMap<>(); - DynamicCacheDescriptor old = registeredTemplates.put(cfg.getName(), desc); + boolean hasSchemaPatchConflict = false; + boolean active = ctx.state().clusterState().active(); - assert old == null : old; - } - } + boolean isMergeConfigSupport = isMergeConfigSupports(null); for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : joinData.caches().values()) { CacheConfiguration cfg = cacheInfo.cacheData().config(); @@ -1421,49 +1809,138 @@ private String processJoiningNode(CacheJoinNodeDiscoveryData joinData, UUID node continue; } - int cacheId = CU.cacheId(cfg.getName()); + registerNewCache(joinData, nodeId, cacheInfo); + } + else if (!active && isMergeConfigSupport) { + DynamicCacheDescriptor desc = registeredCaches.get(cfg.getName()); - CacheGroupDescriptor grpDesc = registerCacheGroup(null, - null, - cfg, - cacheId, + QuerySchemaPatch schemaPatch = desc.makeSchemaPatch(cacheInfo.cacheData().queryEntities()); + + if (schemaPatch.hasConflicts()) { + hasSchemaPatchConflict = true; + + log.error("Error during making schema patch : " + schemaPatch.getConflictsMessage()); + } + else if (!schemaPatch.isEmpty() && !hasSchemaPatchConflict) + patchesToApply.put(desc, schemaPatch); + } + + ctx.discovery().addClientNode(cfg.getName(), nodeId, cfg.getNearConfiguration() != null); + } + + //If conflict was detected we don't merge config and we leave existed config. + if (!hasSchemaPatchConflict && !patchesToApply.isEmpty()) + for (Map.Entry entry : patchesToApply.entrySet()) { + if (entry.getKey().applySchemaPatch(entry.getValue())) + saveCacheConfiguration(entry.getKey()); + } + + if (joinData.startCaches()) { + for (DynamicCacheDescriptor desc : registeredCaches.values()) { + ctx.discovery().addClientNode(desc.cacheName(), nodeId, - joinData.cacheDeploymentId()); + desc.cacheConfiguration().getNearConfiguration() != null); + } + } + + return null; + } + + /** + * Register new cache received from joining node. + * + * @param joinData Data from joining node. + * @param nodeId Joining node id. + * @param cacheInfo Cache info of new node. + */ + private void registerNewCache( + CacheJoinNodeDiscoveryData joinData, + UUID nodeId, + CacheJoinNodeDiscoveryData.CacheInfo cacheInfo) { + CacheConfiguration cfg = cacheInfo.cacheData().config(); + + int cacheId = CU.cacheId(cfg.getName()); + + CacheGroupDescriptor grpDesc = registerCacheGroup(null, + null, + cfg, + cacheId, + nodeId, + joinData.cacheDeploymentId()); + + ctx.discovery().setCacheFilter( + cacheId, + grpDesc.groupId(), + cfg.getName(), + cfg.getNearConfiguration() != null); + + DynamicCacheDescriptor desc = new DynamicCacheDescriptor(ctx, + cfg, + cacheInfo.cacheType(), + grpDesc, + false, + nodeId, + cacheInfo.isStaticallyConfigured(), + cacheInfo.sql(), + joinData.cacheDeploymentId(), + new QuerySchema(cacheInfo.cacheData().queryEntities())); + + DynamicCacheDescriptor old = registeredCaches.put(cfg.getName(), desc); + + assert old == null : old; + } - ctx.discovery().setCacheFilter( - cacheId, - grpDesc.groupId(), - cfg.getName(), - cfg.getNearConfiguration() != null); + /** + * Register new cache templates received from joining node. + * + * @param joinData Data from joining node. + * @param nodeId Joining node id. + */ + private void registerNewCacheTemplates(CacheJoinNodeDiscoveryData joinData, UUID nodeId) { + for (CacheJoinNodeDiscoveryData.CacheInfo cacheInfo : joinData.templates().values()) { + CacheConfiguration cfg = cacheInfo.cacheData().config(); + if (!registeredTemplates.containsKey(cfg.getName())) { DynamicCacheDescriptor desc = new DynamicCacheDescriptor(ctx, cfg, cacheInfo.cacheType(), - grpDesc, - false, + null, + true, nodeId, true, - cacheInfo.sql(), + false, joinData.cacheDeploymentId(), new QuerySchema(cacheInfo.cacheData().queryEntities())); - DynamicCacheDescriptor old = registeredCaches.put(cfg.getName(), desc); + DynamicCacheDescriptor old = registeredTemplates.put(cfg.getName(), desc); assert old == null : old; } - - ctx.discovery().addClientNode(cfg.getName(), nodeId, cfg.getNearConfiguration() != null); } + } - if (joinData.startCaches()) { - for (DynamicCacheDescriptor desc : registeredCaches.values()) { - ctx.discovery().addClientNode(desc.cacheName(), - nodeId, - desc.cacheConfiguration().getNearConfiguration() != null); - } + /** + * @return {@code true} if grid supports merge of config and {@code False} otherwise. + */ + public boolean isMergeConfigSupports(ClusterNode joiningNode) { + DiscoCache discoCache = ctx.discovery().discoCache(); + + if (discoCache == null) + return true; + + if (joiningNode != null && joiningNode.version().compareToIgnoreTimestamp(V_MERGE_CONFIG_SINCE) < 0) + return false; + + Collection nodes = discoCache.allNodes(); + + for (ClusterNode node : nodes) { + IgniteProductVersion version = node.version(); + + if (version.compareToIgnoreTimestamp(V_MERGE_CONFIG_SINCE) < 0) + return false; } - return null; + return true; } /** @@ -1526,7 +2003,7 @@ private CacheGroupDescriptor registerCacheGroup( Map caches = Collections.singletonMap(startedCacheCfg.getName(), cacheId); - boolean persistent = CU.isPersistentCache(startedCacheCfg, ctx.config().getDataStorageConfiguration()); + boolean persistent = resolvePersistentFlag(exchActions, startedCacheCfg); CacheGroupDescriptor grpDesc = new CacheGroupDescriptor( startedCacheCfg, @@ -1555,6 +2032,60 @@ private CacheGroupDescriptor registerCacheGroup( return grpDesc; } + /** + * Resolves persistent flag for new cache group descriptor. + * + * @param exchActions Optional exchange actions to update if new group was added. + * @param startedCacheCfg Started cache configuration. + */ + private boolean resolvePersistentFlag(@Nullable ExchangeActions exchActions, + CacheConfiguration startedCacheCfg) { + if (!ctx.clientNode()) { + // On server, we always can determine whether cache is persistent by local storage configuration. + return CU.isPersistentCache(startedCacheCfg, ctx.config().getDataStorageConfiguration()); + } + else if (exchActions == null) { + // It's either client local join event or cache is statically configured on another node. + // No need to resolve on client - we'll anyway receive group descriptor from server with correct flag. + return false; + } + else { + // Dynamic cache start. Initiator of the start may not have known whether cache should be persistent. + // On client, we should peek attributes of any affinity server node to get data storage configuration. + Collection aliveSrvNodes = ctx.discovery().aliveServerNodes(); + + assert !aliveSrvNodes.isEmpty() : "No alive server nodes"; + + for (ClusterNode srvNode : aliveSrvNodes) { + if (CU.affinityNode(srvNode, startedCacheCfg.getNodeFilter())) { + Object dsCfgBytes = srvNode.attribute(IgniteNodeAttributes.ATTR_DATA_STORAGE_CONFIG); + + if (dsCfgBytes instanceof byte[]) { + try { + DataStorageConfiguration crdDsCfg = new JdkMarshaller().unmarshal( + (byte[])dsCfgBytes, U.resolveClassLoader(ctx.config())); + + return CU.isPersistentCache(startedCacheCfg, crdDsCfg); + } + catch (IgniteCheckedException e) { + U.error(log, "Failed to unmarshal remote data storage configuration [remoteNode=" + + srvNode + ", cacheName=" + startedCacheCfg.getName() + "]", e); + } + } + else { + U.error(log, "Remote marshalled data storage configuration is absent [remoteNode=" + srvNode + + ", cacheName=" + startedCacheCfg.getName() + ", dsCfg=" + dsCfgBytes + "]"); + } + } + } + + U.error(log, "Failed to find affinity server node with data storage configuration for starting cache " + + "[cacheName=" + startedCacheCfg.getName() + ", aliveSrvNodes=" + aliveSrvNodes + "]"); + + return false; + } + } + /** * @param ccfg Cache configuration to start. * @throws IgniteCheckedException If failed. @@ -1640,6 +2171,7 @@ ConcurrentMap registeredCacheGroups() { /** * Returns registered cache descriptors ordered by {@code comparator} + * * @param comparator Comparator (DIRECT, REVERSE or custom) to order cache descriptors. * @return Ordered by comparator cache descriptors. */ @@ -1679,7 +2211,6 @@ public ClusterCachesReconnectResult onReconnected(boolean active, boolean transi Set stoppedCacheGrps = new HashSet<>(); Set survivedCaches = new HashSet<>(); - Set survivedCacheGrps = new HashSet<>(); if (!active) { joinOnTransition = transition; @@ -1722,11 +2253,8 @@ public ClusterCachesReconnectResult onReconnected(boolean active, boolean transi if (stopped) stoppedCacheGrps.add(locDesc.groupId()); - else { + else assert locDesc.groupId() == desc.groupId(); - - survivedCacheGrps.add(locDesc.groupId()); - } } for (Map.Entry e : cachesOnDisconnect.caches.entrySet()) { @@ -1751,7 +2279,6 @@ public ClusterCachesReconnectResult onReconnected(boolean active, boolean transi } if (locJoinCachesCtx != null) { - locJoinCachesCtx.removeSurvivedCacheGroups(survivedCacheGrps); locJoinCachesCtx.removeSurvivedCaches(survivedCaches); if (locJoinCachesCtx.isEmpty()) @@ -1789,6 +2316,28 @@ private boolean surviveReconnect(String cacheName) { return CU.isUtilityCache(cacheName); } + /** + * @param cacheName Cache name. + * @return {@code True} if cache is restarting. + */ + public boolean isRestarting(String cacheName) { + return restartingCaches.containsKey(cacheName); + } + + /** + * @param cacheName Cache name which restart were cancelled. + */ + public void removeRestartingCache(String cacheName) { + restartingCaches.remove(cacheName); + } + + /** + * Clear up information about restarting caches. + */ + public void removeRestartingCaches() { + restartingCaches.clear(); + } + /** * Holds direct comparator (first system caches) and reverse comparator (first user caches). * Use DIRECT comparator for ordering cache start operations. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeBatch.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeBatch.java index 83459a5c03589..d85e29b673fe2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeBatch.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeBatch.java @@ -76,6 +76,11 @@ public DynamicCacheChangeBatch(Collection reqs) { return false; } + /** {@inheritDoc} */ + @Override public boolean stopProcess() { + return false; + } + /** {@inheritDoc} */ @Override public DiscoCache createDiscoCache(GridDiscoveryManager mgr, AffinityTopologyVersion topVer, DiscoCache discoCache) { diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeFailureMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeFailureMessage.java new file mode 100644 index 0000000000000..d0cb08da4a1e1 --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeFailureMessage.java @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.processors.cache; + +import java.util.Collection; +import org.apache.ignite.IgniteCheckedException; +import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.internal.managers.discovery.DiscoCache; +import org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage; +import org.apache.ignite.internal.managers.discovery.GridDiscoveryManager; +import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; +import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionExchangeId; +import org.apache.ignite.internal.util.tostring.GridToStringInclude; +import org.apache.ignite.internal.util.typedef.F; +import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.lang.IgniteUuid; +import org.jetbrains.annotations.Nullable; + +/** + * This class represents discovery message that is used to provide information about dynamic cache start failure. + */ +public class DynamicCacheChangeFailureMessage implements DiscoveryCustomMessage { + /** */ + private static final long serialVersionUID = 0L; + + /** Cache names. */ + @GridToStringInclude + private Collection cacheNames; + + /** Custom message ID. */ + private IgniteUuid id; + + /** */ + private GridDhtPartitionExchangeId exchId; + + /** */ + @GridToStringInclude + private IgniteCheckedException cause; + + /** Cache updates to be executed on exchange. */ + private transient ExchangeActions exchangeActions; + + /** + * Creates new DynamicCacheChangeFailureMessage instance. + * + * @param locNode Local node. + * @param exchId Exchange Id. + * @param cause Cache start error. + * @param cacheNames Cache names. + */ + public DynamicCacheChangeFailureMessage( + ClusterNode locNode, + GridDhtPartitionExchangeId exchId, + IgniteCheckedException cause, + Collection cacheNames) + { + assert exchId != null; + assert cause != null; + assert !F.isEmpty(cacheNames) : cacheNames; + + this.id = IgniteUuid.fromUuid(locNode.id()); + this.exchId = exchId; + this.cause = cause; + this.cacheNames = cacheNames; + } + + /** {@inheritDoc} */ + @Override public IgniteUuid id() { + return id; + } + + /** + * @return Collection of failed caches. + */ + public Collection cacheNames() { + return cacheNames; + } + + /** + * @return Cache start error. + */ + public IgniteCheckedException error() { + return cause; + } + + /** + * @return Cache updates to be executed on exchange. + */ + public ExchangeActions exchangeActions() { + return exchangeActions; + } + + /** + * @param exchangeActions Cache updates to be executed on exchange. + */ + public void exchangeActions(ExchangeActions exchangeActions) { + assert exchangeActions != null && !exchangeActions.empty() : exchangeActions; + + this.exchangeActions = exchangeActions; + } + + /** + * @return Exchange version. + */ + @Nullable public GridDhtPartitionExchangeId exchangeId() { + return exchId; + } + + /** {@inheritDoc} */ + @Nullable @Override public DiscoveryCustomMessage ackMessage() { + return null; + } + + /** {@inheritDoc} */ + @Override public boolean isMutable() { + return false; + } + + /** {@inheritDoc} */ + @Override public boolean stopProcess() { + return false; + } + + /** {@inheritDoc} */ + @Override public DiscoCache createDiscoCache( + GridDiscoveryManager mgr, + AffinityTopologyVersion topVer, + DiscoCache discoCache) { + return mgr.createDiscoCacheOnCacheChange(topVer, discoCache); + } + + /** {@inheritDoc} */ + @Override public String toString() { + return S.toString(DynamicCacheChangeFailureMessage.class, this); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java index 2b942b09aa884..3dbee2a9b140a 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheChangeRequest.java @@ -17,6 +17,8 @@ package org.apache.ignite.internal.processors.cache; +import java.io.Serializable; +import java.util.UUID; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.internal.GridKernalContext; @@ -25,9 +27,6 @@ import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.Nullable; -import java.io.Serializable; -import java.util.UUID; - /** * Cache start/stop request. */ @@ -68,6 +67,9 @@ public class DynamicCacheChangeRequest implements Serializable { /** Restart flag. */ private boolean restart; + /** Restart operation id. */ + private IgniteUuid restartId; + /** Cache active on start or not*/ private boolean disabledAfterStart; @@ -261,6 +263,20 @@ public void restart(boolean restart) { this.restart = restart; } + /** + * @return Id of restart to allow only initiator start the restarting cache. + */ + public IgniteUuid restartId() { + return restartId; + } + + /** + * @param restartId Id of cache restart requester. + */ + public void restartId(IgniteUuid restartId) { + this.restartId = restartId; + } + /** * @return Cache name. */ @@ -378,7 +394,7 @@ public void receivedFrom(UUID nodeId) { /** * @return ID of node provided cache configuration in discovery data. */ - @Nullable public UUID receivedFrom() { + public @Nullable UUID receivedFrom() { return rcvdFrom; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheDescriptor.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheDescriptor.java index cad84144b519f..93882a253bf65 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheDescriptor.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/DynamicCacheDescriptor.java @@ -17,14 +17,17 @@ package org.apache.ignite.internal.processors.cache; +import java.util.Collection; import java.util.UUID; import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cache.CacheMode; +import org.apache.ignite.cache.QueryEntity; import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.internal.GridKernalContext; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cacheobject.IgniteCacheObjectProcessor; import org.apache.ignite.internal.processors.query.QuerySchema; +import org.apache.ignite.internal.processors.query.QuerySchemaPatch; import org.apache.ignite.internal.processors.query.schema.message.SchemaFinishDiscoveryMessage; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.typedef.internal.CU; @@ -345,6 +348,31 @@ public void schemaChangeFinish(SchemaFinishDiscoveryMessage msg) { } } + /** + * Make schema patch for this cache. + * + * @param target Query entity list which current schema should be expanded to. + * @return Patch which contains operations for expanding schema of this cache. + * @see QuerySchemaPatch + */ + public QuerySchemaPatch makeSchemaPatch(Collection target) { + synchronized (schemaMux) { + return schema.makePatch(target); + } + } + + /** + * Apply query schema patch for changing current schema. + * + * @param patch patch to apply. + * @return {@code true} if applying was success and {@code false} otherwise. + */ + public boolean applySchemaPatch(QuerySchemaPatch patch) { + synchronized (schemaMux) { + return schema.applyPatch(patch); + } + } + /** * Form a {@link StoredCacheData} with all data to correctly restore cache params when its configuration is read * from page store. Essentially, this method takes from {@link DynamicCacheDescriptor} all that's needed to start diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeActions.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeActions.java index bcf3f408c222f..6431d0f6c21a9 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeActions.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeActions.java @@ -99,17 +99,18 @@ public Collection cacheStartRequests() { /** * @return Stop cache requests. */ - Collection cacheStopRequests() { + public Collection cacheStopRequests() { return cachesToStop != null ? cachesToStop.values() : Collections.emptyList(); } /** * @param ctx Context. + * @param err Error if any. */ - public void completeRequestFutures(GridCacheSharedContext ctx) { - completeRequestFutures(cachesToStart, ctx); - completeRequestFutures(cachesToStop, ctx); - completeRequestFutures(cachesToResetLostParts, ctx); + public void completeRequestFutures(GridCacheSharedContext ctx, Throwable err) { + completeRequestFutures(cachesToStart, ctx, err); + completeRequestFutures(cachesToStop, ctx, err); + completeRequestFutures(cachesToResetLostParts, ctx, err); } /** @@ -130,10 +131,14 @@ public boolean systemCachesStarting() { * @param map Actions map. * @param ctx Context. */ - private void completeRequestFutures(Map map, GridCacheSharedContext ctx) { + private void completeRequestFutures( + Map map, + GridCacheSharedContext ctx, + @Nullable Throwable err + ) { if (map != null) { for (CacheActionData req : map.values()) - ctx.cache().completeCacheStartFuture(req.req, true, null); + ctx.cache().completeCacheStartFuture(req.req, (err == null), err); } } @@ -409,7 +414,7 @@ public DynamicCacheDescriptor descriptor() { /** * */ - static class CacheGroupActionData { + public static class CacheGroupActionData { /** */ private final CacheGroupDescriptor desc; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeContext.java index 4046c98b29414..74e3bec8f5f09 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeContext.java @@ -103,7 +103,7 @@ public boolean fetchAffinityOnJoin() { /** * @param grpId Cache group ID. */ - void addGroupAffinityRequestOnJoin(Integer grpId) { + synchronized void addGroupAffinityRequestOnJoin(Integer grpId) { if (requestGrpsAffOnJoin == null) requestGrpsAffOnJoin = new HashSet<>(); @@ -113,7 +113,7 @@ void addGroupAffinityRequestOnJoin(Integer grpId) { /** * @return Groups to request affinity for. */ - @Nullable public Set groupsAffinityRequestOnJoin() { + @Nullable public synchronized Set groupsAffinityRequestOnJoin() { return requestGrpsAffOnJoin; } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeDiscoveryEvents.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeDiscoveryEvents.java index 0e7e01c131f26..85b7c06e0d972 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeDiscoveryEvents.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/ExchangeDiscoveryEvents.java @@ -18,6 +18,7 @@ package org.apache.ignite.internal.processors.cache; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.UUID; import org.apache.ignite.IgniteLogger; @@ -29,14 +30,12 @@ import org.apache.ignite.internal.managers.discovery.DiscoCache; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture; -import org.apache.ignite.internal.util.typedef.internal.CU; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import static org.apache.ignite.events.EventType.EVT_NODE_FAILED; import static org.apache.ignite.events.EventType.EVT_NODE_JOINED; import static org.apache.ignite.events.EventType.EVT_NODE_LEFT; -import static org.apache.ignite.internal.events.DiscoveryCustomEvent.EVT_DISCOVERY_CUSTOM_EVT; /** * Discovery events processed in single exchange (contain multiple events if exchanges for multiple @@ -59,7 +58,7 @@ public class ExchangeDiscoveryEvents { private DiscoveryEvent lastSrvEvt; /** All events. */ - private List evts = new ArrayList<>(); + private List evts = Collections.synchronizedList(new ArrayList<>()); /** Server join flag. */ private boolean srvJoin; @@ -78,11 +77,6 @@ public class ExchangeDiscoveryEvents { * @param fut Current exchange future. */ public void processEvents(GridDhtPartitionsExchangeFuture fut) { - for (DiscoveryEvent evt : evts) { - if (evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED) - fut.sharedContext().mvcc().removeExplicitNodeLocks(evt.eventNode().id(), fut.initialVersion()); - } - if (hasServerLeft()) warnNoAffinityNodes(fut.sharedContext()); } @@ -127,7 +121,7 @@ void addEvent(AffinityTopologyVersion topVer, DiscoveryEvent evt, DiscoCache cac ClusterNode node = evt.eventNode(); - if (!CU.clientNode(node)) { + if (!node.isClient()) { lastSrvEvt = evt; srvEvtTopVer = new AffinityTopologyVersion(evt.topologyVersion(), 0); @@ -135,7 +129,7 @@ void addEvent(AffinityTopologyVersion topVer, DiscoveryEvent evt, DiscoCache cac if (evt.type()== EVT_NODE_JOINED) srvJoin = true; else if (evt.type() == EVT_NODE_LEFT || evt.type() == EVT_NODE_FAILED) - srvLeft = !CU.clientNode(node); + srvLeft = !node.isClient(); } } @@ -151,7 +145,7 @@ public List events() { * @return {@code True} if given event is {@link EventType#EVT_NODE_FAILED} or {@link EventType#EVT_NODE_LEFT}. */ public static boolean serverLeftEvent(DiscoveryEvent evt) { - return ((evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT) && !CU.clientNode(evt.eventNode())); + return ((evt.type() == EVT_NODE_FAILED || evt.type() == EVT_NODE_LEFT) && !evt.eventNode().isClient()); } /** @@ -159,7 +153,7 @@ public static boolean serverLeftEvent(DiscoveryEvent evt) { * @return {@code True} if given event is {@link EventType#EVT_NODE_JOINED}. */ public static boolean serverJoinEvent(DiscoveryEvent evt) { - return (evt.type() == EVT_NODE_JOINED && !CU.clientNode(evt.eventNode())); + return (evt.type() == EVT_NODE_JOINED && !evt.eventNode().isClient()); } /** @@ -224,6 +218,7 @@ public void warnNoAffinityNodes(GridCacheSharedContext cctx) { null, null, null, + null, false, null, false, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/FetchActiveTxOwnerTraceClosure.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/FetchActiveTxOwnerTraceClosure.java new file mode 100644 index 0000000000000..0c51c83c4f31e --- /dev/null +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/FetchActiveTxOwnerTraceClosure.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ignite.internal.processors.cache; + +import org.apache.ignite.lang.IgniteCallable; + +import java.lang.management.ManagementFactory; +import java.lang.management.ThreadInfo; +import java.lang.management.ThreadMXBean; + +/** + * Closure that is computed on near node to get the stack trace of active transaction owner thread. + */ +public class FetchActiveTxOwnerTraceClosure implements IgniteCallable { + /** */ + private static final long serialVersionUID = 0L; + + /** */ + private static final StackTraceElement[] STACK_TRACE_ELEMENT_EMPTY = new StackTraceElement[0]; + + /** */ + private final long txOwnerThreadId; + + /** */ + public FetchActiveTxOwnerTraceClosure(long txOwnerThreadId) { + this.txOwnerThreadId = txOwnerThreadId; + } + + /** + * Builds the stack trace dump of the transaction owner thread + * + * @return stack trace dump string + * @throws Exception If failed + */ + @Override public String call() throws Exception { + StringBuilder traceDump = new StringBuilder("Stack trace of the transaction owner thread:\n"); + + for (StackTraceElement stackTraceElement : getStackTrace()) { + traceDump.append(stackTraceElement.toString()); + traceDump.append("\n"); + } + + return traceDump.toString(); + } + + /** + * Gets the stack trace of the transaction owner thread + * + * @return stack trace elements + */ + private StackTraceElement[] getStackTrace() { + ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean(); + + ThreadInfo threadInfo; + + try { + threadInfo = threadMXBean.getThreadInfo(txOwnerThreadId, Integer.MAX_VALUE); + } + catch (SecurityException | IllegalArgumentException ignored) { + threadInfo = null; + } + + return threadInfo == null ? STACK_TRACE_ELEMENT_EMPTY : threadInfo.getStackTrace(); + } +} diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java index 27fc39563e36a..4b657d2bcf9ea 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GatewayProtectedCacheProxy.java @@ -36,6 +36,7 @@ import javax.cache.processor.EntryProcessor; import javax.cache.processor.EntryProcessorResult; import org.apache.ignite.IgniteCache; +import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.cache.CacheEntry; import org.apache.ignite.cache.CacheEntryProcessor; import org.apache.ignite.cache.CacheMetrics; @@ -48,6 +49,7 @@ import org.apache.ignite.cache.query.SqlFieldsQuery; import org.apache.ignite.cluster.ClusterGroup; import org.apache.ignite.internal.AsyncSupportAdapter; +import org.apache.ignite.internal.GridKernalState; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.lang.IgniteBiPredicate; import org.apache.ignite.lang.IgniteClosure; @@ -138,15 +140,13 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { /** {@inheritDoc} */ @Override public GatewayProtectedCacheProxy withExpiryPolicy(ExpiryPolicy plc) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return new GatewayProtectedCacheProxy<>(delegate, opCtx.withExpiryPolicy(plc), lock); } finally { - onLeave(gate, prev); + onLeave(opGate); } } @@ -157,9 +157,7 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { /** {@inheritDoc} */ @Override public GatewayProtectedCacheProxy skipStore() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { boolean skip = opCtx.skipStore(); @@ -170,15 +168,13 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { return new GatewayProtectedCacheProxy<>(delegate, opCtx.setSkipStore(true), lock); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public GatewayProtectedCacheProxy withNoRetries() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { boolean noRetries = opCtx.noRetries(); @@ -189,15 +185,13 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { return new GatewayProtectedCacheProxy<>(delegate, opCtx.setNoRetries(true), lock); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public GatewayProtectedCacheProxy withPartitionRecover() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { boolean recovery = opCtx.recovery(); @@ -208,7 +202,7 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { return new GatewayProtectedCacheProxy<>(delegate, opCtx.setRecovery(true), lock); } finally { - onLeave(gate, prev); + onLeave(opGate); } } @@ -219,23 +213,19 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { /** {@inheritDoc} */ @Override public GatewayProtectedCacheProxy keepBinary() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return new GatewayProtectedCacheProxy<>((IgniteCacheProxy) delegate, opCtx.keepBinary(), lock); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public GatewayProtectedCacheProxy withDataCenterId(byte dataCenterId) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { Byte prevDataCenterId = opCtx.dataCenterId(); @@ -246,91 +236,79 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { return new GatewayProtectedCacheProxy<>(delegate, opCtx.setDataCenterId(dataCenterId), lock); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void loadCache(@Nullable IgniteBiPredicate p, @Nullable Object... args) throws CacheException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.loadCache(p, args); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture loadCacheAsync(@Nullable IgniteBiPredicate p, @Nullable Object... args) throws CacheException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.loadCacheAsync(p, args); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void localLoadCache(@Nullable IgniteBiPredicate p, @Nullable Object... args) throws CacheException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.localLoadCache(p, args); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture localLoadCacheAsync(@Nullable IgniteBiPredicate p, @Nullable Object... args) throws CacheException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.localLoadCacheAsync(p, args); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public V getAndPutIfAbsent(K key, V val) throws CacheException, TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAndPutIfAbsent(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture getAndPutIfAbsentAsync(K key, V val) throws CacheException, TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAndPutIfAbsentAsync(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } @@ -346,1093 +324,937 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { /** {@inheritDoc} */ @Override public boolean isLocalLocked(K key, boolean byCurrThread) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.isLocalLocked(key, byCurrThread); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public QueryCursor query(Query qry) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.query(qry); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public FieldsQueryCursor> query(SqlFieldsQuery qry) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.query(qry); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public List>> queryMultipleStatements(SqlFieldsQuery qry) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.queryMultipleStatements(qry); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public QueryCursor query(Query qry, IgniteClosure transformer) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.query(qry, transformer); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public Iterable> localEntries(CachePeekMode... peekModes) throws CacheException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.localEntries(peekModes); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public QueryMetrics queryMetrics() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.queryMetrics(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void resetQueryMetrics() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.resetQueryMetrics(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public Collection queryDetailMetrics() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.queryDetailMetrics(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void resetQueryDetailMetrics() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.resetQueryDetailMetrics(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void localEvict(Collection keys) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.localEvict(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public V localPeek(K key, CachePeekMode... peekModes) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.localPeek(key, peekModes); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public int size(CachePeekMode... peekModes) throws CacheException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.size(peekModes); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture sizeAsync(CachePeekMode... peekModes) throws CacheException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.sizeAsync(peekModes); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public long sizeLong(CachePeekMode... peekModes) throws CacheException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.sizeLong(peekModes); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture sizeLongAsync(CachePeekMode... peekModes) throws CacheException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.sizeLongAsync(peekModes); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public long sizeLong(int partition, CachePeekMode... peekModes) throws CacheException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.sizeLong(partition, peekModes); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture sizeLongAsync(int partition, CachePeekMode... peekModes) throws CacheException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.sizeLongAsync(partition, peekModes); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public int localSize(CachePeekMode... peekModes) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.localSize(peekModes); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public long localSizeLong(CachePeekMode... peekModes) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.localSizeLong(peekModes); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public long localSizeLong(int partition, CachePeekMode... peekModes) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.localSizeLong(partition, peekModes); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public Map> invokeAll(Map> map, Object... args) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.invokeAll(map, args); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture>> invokeAllAsync(Map> map, Object... args) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.invokeAllAsync(map, args); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public V get(K key) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.get(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture getAsync(K key) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAsync(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public CacheEntry getEntry(K key) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getEntry(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture> getEntryAsync(K key) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getEntryAsync(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public Map getAll(Set keys) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAll(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture> getAllAsync(Set keys) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAllAsync(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public Collection> getEntries(Set keys) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getEntries(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture>> getEntriesAsync(Set keys) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getEntriesAsync(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public Map getAllOutTx(Set keys) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAllOutTx(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture> getAllOutTxAsync(Set keys) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAllOutTxAsync(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public boolean containsKey(K key) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.containsKey(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void loadAll(Set keys, boolean replaceExisting, CompletionListener completionListener) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.loadAll(keys, replaceExisting, completionListener); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture containsKeyAsync(K key) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.containsKeyAsync(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public boolean containsKeys(Set keys) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.containsKeys(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture containsKeysAsync(Set keys) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.containsKeysAsync(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void put(K key, V val) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.put(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture putAsync(K key, V val) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.putAsync(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public V getAndPut(K key, V val) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAndPut(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture getAndPutAsync(K key, V val) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAndPutAsync(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void putAll(Map map) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.putAll(map); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture putAllAsync(Map map) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.putAllAsync(map); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public boolean putIfAbsent(K key, V val) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.putIfAbsent(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture putIfAbsentAsync(K key, V val) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.putIfAbsentAsync(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public boolean remove(K key) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.remove(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture removeAsync(K key) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.removeAsync(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public boolean remove(K key, V oldVal) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.remove(key, oldVal); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture removeAsync(K key, V oldVal) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.removeAsync(key, oldVal); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public V getAndRemove(K key) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAndRemove(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture getAndRemoveAsync(K key) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAndRemoveAsync(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public boolean replace(K key, V oldVal, V newVal) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.replace(key, oldVal, newVal); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture replaceAsync(K key, V oldVal, V newVal) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.replaceAsync(key, oldVal, newVal); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public boolean replace(K key, V val) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.replace(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture replaceAsync(K key, V val) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.replaceAsync(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public V getAndReplace(K key, V val) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAndReplace(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture getAndReplaceAsync(K key, V val) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.getAndReplaceAsync(key, val); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void removeAll(Set keys) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.removeAll(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture removeAllAsync(Set keys) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.removeAllAsync(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void removeAll() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.removeAll(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture removeAllAsync() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.removeAllAsync(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void clear() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.clear(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture clearAsync() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.clearAsync(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void clear(K key) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.clear(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture clearAsync(K key) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.clearAsync(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void clearAll(Set keys) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.clearAll(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture clearAllAsync(Set keys) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.clearAllAsync(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void localClear(K key) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.localClear(key); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void localClearAll(Set keys) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.localClearAll(keys); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public T invoke(K key, EntryProcessor entryProcessor, Object... arguments) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.invoke(key, entryProcessor, arguments); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture invokeAsync(K key, EntryProcessor entryProcessor, Object... arguments) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.invokeAsync(key, entryProcessor, arguments); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public T invoke(K key, CacheEntryProcessor entryProcessor, Object... arguments) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.invoke(key, entryProcessor, arguments); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture invokeAsync(K key, CacheEntryProcessor entryProcessor, Object... arguments) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.invokeAsync(key, entryProcessor, arguments); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public Map> invokeAll(Set keys, EntryProcessor entryProcessor, Object... args) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.invokeAll(keys, entryProcessor, args); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture>> invokeAllAsync(Set keys, EntryProcessor entryProcessor, Object... args) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.invokeAllAsync(keys, entryProcessor, args); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public Map> invokeAll(Set keys, CacheEntryProcessor entryProcessor, Object... args) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.invokeAll(keys, entryProcessor, args); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public IgniteFuture>> invokeAllAsync(Set keys, CacheEntryProcessor entryProcessor, Object... args) throws TransactionException { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.invokeAllAsync(keys, entryProcessor, args); } finally { - onLeave(gate, prev); + onLeave(opGate); } } @@ -1443,43 +1265,37 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { /** {@inheritDoc} */ @Override public void registerCacheEntryListener(CacheEntryListenerConfiguration cacheEntryListenerConfiguration) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.registerCacheEntryListener(cacheEntryListenerConfiguration); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void deregisterCacheEntryListener(CacheEntryListenerConfiguration cacheEntryListenerConfiguration) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.deregisterCacheEntryListener(cacheEntryListenerConfiguration); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public Iterator> iterator() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.iterator(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } @@ -1550,99 +1366,133 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { /** {@inheritDoc} */ @Override public CacheMetrics metrics() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.metrics(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public CacheMetrics metrics(ClusterGroup grp) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.metrics(grp); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public CacheMetrics localMetrics() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.localMetrics(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public CacheMetricsMXBean mxBean() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.mxBean(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public CacheMetricsMXBean localMxBean() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.localMxBean(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public Collection lostPartitions() { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { return delegate.lostPartitions(); } finally { - onLeave(gate, prev); + onLeave(opGate); } } /** {@inheritDoc} */ @Override public void enableStatistics(boolean enabled) { - GridCacheGateway gate = gate(); - - CacheOperationContext prev = onEnter(gate, opCtx); + CacheOperationGate opGate = onEnter(); try { delegate.enableStatistics(enabled); } finally { - onLeave(gate, prev); + onLeave(opGate); + } + } + + /** {@inheritDoc} */ + @Override public void preloadPartition(int part) { + CacheOperationGate opGate = onEnter(); + + try { + delegate.preloadPartition(part); + } + finally { + onLeave(opGate); + } + } + + /** {@inheritDoc} */ + @Override public IgniteFuture preloadPartitionAsync(int part) { + CacheOperationGate opGate = onEnter(); + + try { + return delegate.preloadPartitionAsync(part); + } + finally { + onLeave(opGate); + } + } + + /** {@inheritDoc} */ + @Override public boolean localPreloadPartition(int part) { + CacheOperationGate opGate = onEnter(); + + try { + return delegate.localPreloadPartition(part); + } + finally { + onLeave(opGate); + } + } + + /** {@inheritDoc} */ + @Override public void clearStatistics() { + CacheOperationGate opGate = onEnter(); + + try { + delegate.clearStatistics(); + } + finally { + onLeave(opGate); } } @@ -1662,26 +1512,54 @@ public void setCacheManager(org.apache.ignite.cache.CacheManager cacheMgr) { * * @param gate Cache gateway. */ - private void checkProxyIsValid(@Nullable GridCacheGateway gate) { + private GridCacheGateway checkProxyIsValid(@Nullable GridCacheGateway gate, boolean tryRestart) { if (isProxyClosed()) throw new IllegalStateException("Cache has been closed: " + context().name()); - if (delegate instanceof IgniteCacheProxyImpl) + boolean isCacheProxy = delegate instanceof IgniteCacheProxyImpl; + + if (isCacheProxy) ((IgniteCacheProxyImpl) delegate).checkRestart(); if (gate == null) throw new IllegalStateException("Gateway is unavailable. Probably cache has been destroyed, but proxy is not closed."); + + if (isCacheProxy && tryRestart && gate.isStopped() && + context().kernalContext().gateway().getState() == GridKernalState.STARTED) { + IgniteCacheProxyImpl proxyImpl = (IgniteCacheProxyImpl) delegate; + + try { + IgniteInternalCache cache = context().kernalContext().cache().publicJCache(context().name()).internalProxy(); + + proxyImpl.opportunisticRestart(cache); + + return gate(); + } catch (IgniteCheckedException ice) { + // Opportunity didn't work out. + } + } + + return gate; } /** - * @param gate Cache gateway. - * @param opCtx Cache operation context to guard. * @return Previous projection set on this thread. */ - private CacheOperationContext onEnter(@Nullable GridCacheGateway gate, CacheOperationContext opCtx) { - checkProxyIsValid(gate); + private CacheOperationGate onEnter() { + GridCacheGateway gate = checkProxyIsValid(gate(), true); + + try { + return new CacheOperationGate(gate, + lock ? gate.enter(opCtx) : gate.enterNoLock(opCtx)); + } + catch (IllegalStateException e) { + boolean isCacheProxy = delegate instanceof IgniteCacheProxyImpl; + + if (isCacheProxy) + ((IgniteCacheProxyImpl) delegate).checkRestart(true); - return lock ? gate.enter(opCtx) : gate.enterNoLock(opCtx); + throw e; // If we reached this line. + } } /** @@ -1690,7 +1568,7 @@ private CacheOperationContext onEnter(@Nullable GridCacheGateway gate, Cac */ private boolean onEnterIfNoStop(@Nullable GridCacheGateway gate) { try { - checkProxyIsValid(gate); + checkProxyIsValid(gate, false); } catch (Exception e) { return false; @@ -1700,14 +1578,13 @@ private boolean onEnterIfNoStop(@Nullable GridCacheGateway gate) { } /** - * @param gate Cache gateway. - * @param opCtx Operation context to guard. + * @param opGate Operation context to guard. */ - private void onLeave(GridCacheGateway gate, CacheOperationContext opCtx) { + private void onLeave(CacheOperationGate opGate) { if (lock) - gate.leave(opCtx); + opGate.gate.leave(opGate.prev); else - gate.leaveNoLock(opCtx); + opGate.gate.leaveNoLock(opGate.prev); } /** @@ -1774,4 +1651,28 @@ private void onLeave(GridCacheGateway gate) { @Override public int hashCode() { return delegate.hashCode(); } + + /** + * Holder for gate being entered and operation context to restore. + */ + private class CacheOperationGate { + /** + * Gate being entered in this operation. + */ + public final GridCacheGateway gate; + + /** + * Operation context to restore after current operation completes. + */ + public final CacheOperationContext prev; + + /** + * @param gate Gate being entered in this operation. + * @param prev Operation context to restore after current operation completes. + */ + public CacheOperationGate(GridCacheGateway gate, CacheOperationContext prev) { + this.gate = gate; + this.prev = prev; + } + } } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java index 55357ffb1834f..ff869ab076775 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAdapter.java @@ -73,6 +73,7 @@ import org.apache.ignite.configuration.TransactionConfiguration; import org.apache.ignite.internal.ComputeTaskInternalFuture; import org.apache.ignite.internal.IgniteEx; +import org.apache.ignite.internal.IgniteFeatures; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.IgniteInterruptedCheckedException; import org.apache.ignite.internal.IgniteKernal; @@ -82,12 +83,15 @@ import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; import org.apache.ignite.internal.cluster.ClusterTopologyServerNotFoundException; import org.apache.ignite.internal.cluster.IgniteClusterEx; +import org.apache.ignite.internal.managers.discovery.IgniteClusterNode; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.affinity.GridCacheAffinityImpl; import org.apache.ignite.internal.processors.cache.distributed.IgniteExternalizableExpiryPolicy; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtInvalidPartitionException; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture; +import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxLocalAdapter; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtInvalidPartitionException; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTxLocal; import org.apache.ignite.internal.processors.cache.dr.GridCacheDrInfo; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; @@ -103,11 +107,11 @@ import org.apache.ignite.internal.processors.task.GridInternal; import org.apache.ignite.internal.transactions.IgniteTxHeuristicCheckedException; import org.apache.ignite.internal.transactions.IgniteTxRollbackCheckedException; +import org.apache.ignite.internal.transactions.IgniteTxTimeoutCheckedException; import org.apache.ignite.internal.util.future.GridEmbeddedFuture; import org.apache.ignite.internal.util.future.GridFinishedFuture; import org.apache.ignite.internal.util.future.GridFutureAdapter; import org.apache.ignite.internal.util.lang.GridCloseableIterator; -import org.apache.ignite.internal.util.lang.GridClosureException; import org.apache.ignite.internal.util.tostring.GridToStringExclude; import org.apache.ignite.internal.util.typedef.C1; import org.apache.ignite.internal.util.typedef.C2; @@ -121,7 +125,6 @@ import org.apache.ignite.internal.util.typedef.X; import org.apache.ignite.internal.util.typedef.internal.A; import org.apache.ignite.internal.util.typedef.internal.CU; -import org.apache.ignite.internal.util.typedef.internal.GPC; import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiPredicate; @@ -131,11 +134,13 @@ import org.apache.ignite.lang.IgniteInClosure; import org.apache.ignite.lang.IgniteOutClosure; import org.apache.ignite.lang.IgnitePredicate; +import org.apache.ignite.lang.IgniteProductVersion; +import org.apache.ignite.lang.IgniteRunnable; import org.apache.ignite.mxbean.CacheMetricsMXBean; import org.apache.ignite.plugin.security.SecurityPermission; import org.apache.ignite.resources.IgniteInstanceResource; import org.apache.ignite.resources.JobContextResource; -import org.apache.ignite.spi.discovery.tcp.internal.TcpDiscoveryNode; +import org.apache.ignite.resources.LoggerResource; import org.apache.ignite.transactions.Transaction; import org.apache.ignite.transactions.TransactionConcurrency; import org.apache.ignite.transactions.TransactionIsolation; @@ -144,6 +149,7 @@ import static org.apache.ignite.IgniteSystemProperties.IGNITE_CACHE_KEY_VALIDATION_DISABLED; import static org.apache.ignite.IgniteSystemProperties.IGNITE_CACHE_RETRIES_COUNT; import static org.apache.ignite.internal.GridClosureCallMode.BROADCAST; +import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_LOAD; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE; import static org.apache.ignite.internal.processors.task.GridTaskThreadContextKey.TC_NO_FAILOVER; @@ -173,13 +179,12 @@ public abstract class GridCacheAdapter implements IgniteInternalCache> stash = new ThreadLocal>() { - @Override protected IgniteBiTuple initialValue() { - return new IgniteBiTuple<>(); - } - }; + private static final ThreadLocal> stash = ThreadLocal.withInitial( + () -> new IgniteBiTuple<>()); /** {@link GridCacheReturn}-to-value conversion. */ private static final IgniteClosure RET2VAL = @@ -225,11 +230,7 @@ public abstract class GridCacheAdapter implements IgniteInternalCache lastFut = new ThreadLocal() { - @Override protected FutureHolder initialValue() { - return new FutureHolder(); - } - }; + protected ThreadLocal lastFut = ThreadLocal.withInitial(() -> new FutureHolder()); /** Cache configuration. */ @GridToStringExclude @@ -286,6 +287,9 @@ public abstract class GridCacheAdapter implements IgniteInternalCache executeClearTask(@Nullable Set keys return new GridFinishedFuture<>(); } + /** + * @param part Partition id. + * @return Future. + */ + private IgniteInternalFuture executePreloadTask(int part) throws IgniteCheckedException { + ClusterGroup grp = ctx.grid().cluster().forDataNodes(ctx.name()); + + @Nullable ClusterNode targetNode = ctx.affinity().primaryByPartition(part, ctx.topology().readyTopologyVersion()); + + if (targetNode == null || targetNode.version().compareTo(PRELOAD_PARTITION_SINCE) < 0) { + if (!partPreloadBadVerWarned) { + U.warn(log(), "Attempting to execute partition preloading task on outdated or not mapped node " + + "[targetNodeVer=" + (targetNode == null ? "NA" : targetNode.version()) + + ", minSupportedNodeVer=" + PRELOAD_PARTITION_SINCE + ']'); + + partPreloadBadVerWarned = true; + } + + return new GridFinishedFuture<>(); + } + + return ctx.closures().affinityRun(Collections.singleton(name()), part, + new PartitionPreloadJob(ctx.name(), part), grp.nodes(), null); + } + /** * @param keys Keys. * @param readers Readers flag. @@ -1271,20 +1299,7 @@ private boolean evictx(K key, GridCacheVersion ver, /** {@inheritDoc} */ @Override public final V getForcePrimary(K key) throws IgniteCheckedException { - String taskName = ctx.kernalContext().job().currentTaskName(); - - CacheOperationContext opCtx = ctx.operationContextPerCall(); - - return getAllAsync( - F.asList(key), - /*force primary*/true, - /*skip tx*/false, - /*subject id*/null, - taskName, - /*deserialize cache objects*/true, - opCtx != null && opCtx.recovery(), - /*skip values*/false, - /*need ver*/false).get().get(key); + return getForcePrimaryAsync(key).get(); } /** {@inheritDoc} */ @@ -1297,7 +1312,7 @@ private boolean evictx(K key, GridCacheVersion ver, Collections.singletonList(key), /*force primary*/true, /*skip tx*/false, - null, + /*subject id*/null, taskName, true, opCtx != null && opCtx.recovery(), @@ -1321,7 +1336,7 @@ private boolean evictx(K key, GridCacheVersion ver, CacheOperationContext opCtx = ctx.operationContextPerCall(); return getAllAsync(keys, - !ctx.config().isReadFromBackup(), + false, /*skip tx*/true, null, taskName, @@ -1406,7 +1421,26 @@ private boolean evictx(K key, GridCacheVersion ver, final K key0 = keepBinary ? (K)ctx.toCacheKeyObject(key) : key; - IgniteInternalFuture fut = getAsync(key, !keepBinary, false); + IgniteInternalFuture fut = null; + + try { + checkJta(); + } + catch (IgniteCheckedException e) { + fut = new GridFinishedFuture<>(e); + } + + if (fut == null) { + String taskName = ctx.kernalContext().job().currentTaskName(); + + fut = getAsync(key, + /*skip tx*/false, + null, + taskName, + !keepBinary, + /*skip vals*/false, + false); + } if (ctx.config().getInterceptor() != null) fut = fut.chain(new CX1, V>() { @@ -1418,7 +1452,7 @@ private boolean evictx(K key, GridCacheVersion ver, }); if (statsEnabled) - fut.listen(new UpdateGetTimeStatClosure(metrics0(), start)); + fut.listen(new UpdateGetTimeStatClosure<>(metrics0(), start)); return fut; } @@ -1435,8 +1469,27 @@ private boolean evictx(K key, GridCacheVersion ver, final K key0 = keepBinary ? (K)ctx.toCacheKeyObject(key) : key; - IgniteInternalFuture fut = - (IgniteInternalFuture)getAsync(key0, !keepBinary, true); + IgniteInternalFuture fut = null; + + try { + checkJta(); + } + catch (IgniteCheckedException e) { + fut = new GridFinishedFuture<>(e); + } + + if (fut == null) { + + String taskName = ctx.kernalContext().job().currentTaskName(); + + fut = (IgniteInternalFuture)getAsync(key0, + /*skip tx*/false, + null, + taskName, + !keepBinary, + /*skip vals*/false, + true); + } final boolean intercept = ctx.config().getInterceptor() != null; @@ -1465,7 +1518,7 @@ private boolean evictx(K key, GridCacheVersion ver, }); if (statsEnabled) - fut.listen(new UpdateGetTimeStatClosure(metrics0(), start)); + fut.listen(new UpdateGetTimeStatClosure<>(metrics0(), start)); return fr; } @@ -1506,7 +1559,7 @@ private boolean evictx(K key, GridCacheVersion ver, res = interceptGetEntries(keys, map); else for (Map.Entry e : map.entrySet()) - res.add(new CacheEntryImplEx<>(e.getKey(), (V)e.getValue().value(), e.getValue().version())); + res.add(new CacheEntryImplEx<>(e.getKey(), e.getValue().value(), e.getValue().version())); if (statsEnabled) metrics0().addGetTimeNanos(System.nanoTime() - start); @@ -1528,7 +1581,7 @@ private boolean evictx(K key, GridCacheVersion ver, IgniteInternalFuture> fut = getAllAsync( keys, - !ctx.config().isReadFromBackup(), + false, /*skip tx*/false, opCtx != null ? opCtx.subjectId() : null, taskName, @@ -1545,7 +1598,7 @@ private boolean evictx(K key, GridCacheVersion ver, }); if (statsEnabled) - fut.listen(new UpdateGetTimeStatClosure>(metrics0(), start)); + fut.listen(new UpdateGetTimeStatClosure<>(metrics0(), start)); return fut; } @@ -1566,7 +1619,7 @@ private boolean evictx(K key, GridCacheVersion ver, IgniteInternalFuture> fut = (IgniteInternalFuture>)((IgniteInternalFuture)getAllAsync( keys, - !ctx.config().isReadFromBackup(), + false, /*skip tx*/false, opCtx != null ? opCtx.subjectId() : null, taskName, @@ -1588,7 +1641,7 @@ private boolean evictx(K key, GridCacheVersion ver, for (Map.Entry e : f.get().entrySet()) res.put(e.getKey(), - new CacheEntryImplEx<>(e.getKey(), (V)e.getValue().value(), e.getValue().version())); + new CacheEntryImplEx<>(e.getKey(), e.getValue().value(), e.getValue().version())); return res.values(); } @@ -1596,7 +1649,7 @@ private boolean evictx(K key, GridCacheVersion ver, }); if (statsEnabled) - fut.listen(new UpdateGetTimeStatClosure>(metrics0(), start)); + fut.listen(new UpdateGetTimeStatClosure<>(metrics0(), start)); return rf; } @@ -1665,7 +1718,7 @@ private Collection> interceptGetEntries( assert interceptor != null; for (Map.Entry e : map.entrySet()) { - V val = interceptor.onGet(e.getKey(), (V)e.getValue().value()); + V val = interceptor.onGet(e.getKey(), e.getValue().value()); if (val != null) res.put(e.getKey(), new CacheEntryImplEx<>(e.getKey(), val, e.getValue().version())); @@ -1689,7 +1742,6 @@ private Collection> interceptGetEntries( /** * @param key Key. - * @param forcePrimary Force primary. * @param skipTx Skip tx. * @param subjId Subj Id. * @param taskName Task name. @@ -1700,7 +1752,6 @@ private Collection> interceptGetEntries( */ protected IgniteInternalFuture getAsync( final K key, - boolean forcePrimary, boolean skipTx, @Nullable UUID subjId, String taskName, @@ -1711,7 +1762,7 @@ protected IgniteInternalFuture getAsync( CacheOperationContext opCtx = ctx.operationContextPerCall(); return getAllAsync(Collections.singletonList(key), - forcePrimary, + false, skipTx, subjId, taskName, @@ -1749,7 +1800,7 @@ protected IgniteInternalFuture getAsync( * @return Future for the get operation. * @see GridCacheAdapter#getAllAsync(Collection) */ - protected IgniteInternalFuture> getAllAsync( + protected abstract IgniteInternalFuture> getAllAsync( @Nullable Collection keys, boolean forcePrimary, boolean skipTx, @@ -1759,454 +1810,14 @@ protected IgniteInternalFuture> getAllAsync( boolean recovery, boolean skipVals, final boolean needVer - ) { - CacheOperationContext opCtx = ctx.operationContextPerCall(); - - subjId = ctx.subjectIdPerCall(subjId, opCtx); - - return getAllAsync(keys, - null, - opCtx == null || !opCtx.skipStore(), - !skipTx, - subjId, - taskName, - deserializeBinary, - opCtx != null && opCtx.recovery(), - forcePrimary, - skipVals ? null : expiryPolicy(opCtx != null ? opCtx.expiry() : null), - skipVals, - needVer); - } + ); /** - * @param keys Keys. - * @param readerArgs Near cache reader will be added if not null. - * @param readThrough Read through. - * @param checkTx Check tx. - * @param subjId Subj Id. - * @param taskName Task name. - * @param deserializeBinary Deserialize binary. - * @param recovery Recovery flag. - * @param forcePrimary Froce primary. - * @param expiry Expiry policy. - * @param skipVals Skip values. - * @param needVer Need version. - * @return Future for the get operation. - * @see GridCacheAdapter#getAllAsync(Collection) - */ - public final IgniteInternalFuture> getAllAsync(@Nullable final Collection keys, - @Nullable final ReaderArguments readerArgs, - boolean readThrough, - boolean checkTx, - @Nullable final UUID subjId, - final String taskName, - final boolean deserializeBinary, - final boolean recovery, - final boolean forcePrimary, - @Nullable IgniteCacheExpiryPolicy expiry, - final boolean skipVals, - final boolean needVer - ) { - ctx.checkSecurity(SecurityPermission.CACHE_READ); - - if (keyCheck) - validateCacheKeys(keys); - - return getAllAsync0(ctx.cacheKeysView(keys), - readerArgs, - readThrough, - checkTx, - subjId, - taskName, - deserializeBinary, - expiry, - skipVals, - /*keep cache objects*/false, - recovery, - needVer); - } - - /** - * @param keys Keys. - * @param readerArgs Near cache reader will be added if not null. - * @param readThrough Read-through flag. - * @param checkTx Check local transaction flag. - * @param subjId Subject ID. - * @param taskName Task name/ - * @param deserializeBinary Deserialize binary flag. - * @param expiry Expiry policy. - * @param skipVals Skip values flag. - * @param keepCacheObjects Keep cache objects. - * @param needVer If {@code true} returns values as tuples containing value and version. - * @return Future. - */ - protected final IgniteInternalFuture> getAllAsync0( - @Nullable final Collection keys, - @Nullable final ReaderArguments readerArgs, - final boolean readThrough, - boolean checkTx, - @Nullable final UUID subjId, - final String taskName, - final boolean deserializeBinary, - @Nullable final IgniteCacheExpiryPolicy expiry, - final boolean skipVals, - final boolean keepCacheObjects, - final boolean recovery, - final boolean needVer - ) { - if (F.isEmpty(keys)) - return new GridFinishedFuture<>(Collections.emptyMap()); - - GridNearTxLocal tx = null; - - if (checkTx) { - try { - checkJta(); - } - catch (IgniteCheckedException e) { - return new GridFinishedFuture<>(e); - } - - tx = ctx.tm().threadLocalTx(ctx); - } - - if (tx == null || tx.implicit()) { - Map misses = null; - - Set newLocalEntries = null; - - final AffinityTopologyVersion topVer = tx == null ? ctx.affinity().affinityTopologyVersion() : - tx.topologyVersion(); - - try { - int keysSize = keys.size(); - - GridDhtTopologyFuture topFut = ctx.shared().exchange().lastFinishedFuture(); - - Throwable ex = topFut != null ? topFut.validateCache(ctx, recovery, /*read*/true, null, keys) : null; - - if (ex != null) - return new GridFinishedFuture<>(ex); - - final Map map = keysSize == 1 ? - (Map)new IgniteBiTuple<>() : - U.newHashMap(keysSize); - - final boolean storeEnabled = !skipVals && readThrough && ctx.readThrough(); - - boolean readNoEntry = ctx.readNoEntry(expiry, readerArgs != null); - - for (KeyCacheObject key : keys) { - while (true) { - try { - EntryGetResult res = null; - - boolean evt = !skipVals; - boolean updateMetrics = !skipVals; - - GridCacheEntryEx entry = null; - - boolean skipEntry = readNoEntry; - - if (readNoEntry) { - CacheDataRow row = ctx.offheap().read(ctx, key); - - if (row != null) { - long expireTime = row.expireTime(); - - if (expireTime != 0) { - if (expireTime > U.currentTimeMillis()) { - res = new EntryGetWithTtlResult(row.value(), - row.version(), - false, - expireTime, - 0); - } - else - skipEntry = false; - } - else - res = new EntryGetResult(row.value(), row.version(), false); - } - - if (res != null) { - if (evt) { - ctx.events().readEvent(key, - null, - row.value(), - subjId, - taskName, - !deserializeBinary); - } - - if (updateMetrics && ctx.statisticsEnabled()) - ctx.cache().metrics0().onRead(true); - } - else if (storeEnabled) - skipEntry = false; - } - - if (!skipEntry) { - boolean isNewLocalEntry = this.map.getEntry(ctx, key) == null; - - entry = entryEx(key); - - if (entry == null) { - if (!skipVals && ctx.statisticsEnabled()) - ctx.cache().metrics0().onRead(false); - - break; - } - - if (isNewLocalEntry) { - if (newLocalEntries == null) - newLocalEntries = new HashSet<>(); - - newLocalEntries.add(entry); - } - - if (storeEnabled) { - res = entry.innerGetAndReserveForLoad(updateMetrics, - evt, - subjId, - taskName, - expiry, - !deserializeBinary, - readerArgs); - - assert res != null; - - if (res.value() == null) { - if (misses == null) - misses = new HashMap<>(); - - misses.put(key, res); - - res = null; - } - } - else { - res = entry.innerGetVersioned( - null, - null, - updateMetrics, - evt, - subjId, - null, - taskName, - expiry, - !deserializeBinary, - readerArgs); - - if (res == null) - ctx.evicts().touch(entry, topVer); - } - } - - if (res != null) { - ctx.addResult(map, - key, - res, - skipVals, - keepCacheObjects, - deserializeBinary, - true, - needVer); - - if (entry != null && (tx == null || (!tx.implicit() && tx.isolation() == READ_COMMITTED))) - ctx.evicts().touch(entry, topVer); - - if (keysSize == 1) - // Safe to return because no locks are required in READ_COMMITTED mode. - return new GridFinishedFuture<>(map); - } - - break; - } - catch (GridCacheEntryRemovedException ignored) { - if (log.isDebugEnabled()) - log.debug("Got removed entry in getAllAsync(..) method (will retry): " + key); - } - } - } - - if (storeEnabled && misses != null) { - final Map loadKeys = misses; - - final IgniteTxLocalAdapter tx0 = tx; - - final Collection loaded = new HashSet<>(); - - return new GridEmbeddedFuture( - ctx.closures().callLocalSafe(ctx.projectSafe(new GPC>() { - @Override public Map call() throws Exception { - ctx.store().loadAll(null/*tx*/, loadKeys.keySet(), new CI2() { - @Override public void apply(KeyCacheObject key, Object val) { - EntryGetResult res = loadKeys.get(key); - - if (res == null || val == null) - return; - - loaded.add(key); - - CacheObject cacheVal = ctx.toCacheObject(val); - - while (true) { - GridCacheEntryEx entry = null; - - try { - ctx.shared().database().ensureFreeSpace(ctx.dataRegion()); - } - catch (IgniteCheckedException e) { - // Wrap errors (will be unwrapped). - throw new GridClosureException(e); - } - - ctx.shared().database().checkpointReadLock(); - - try { - entry = entryEx(key); - - entry.unswap(); - - EntryGetResult verVal = entry.versionedValue( - cacheVal, - res.version(), - null, - expiry, - readerArgs); - - if (log.isDebugEnabled()) - log.debug("Set value loaded from store into entry [" + - "oldVer=" + res.version() + - ", newVer=" + verVal.version() + ", " + - "entry=" + entry + ']'); - - // Don't put key-value pair into result map if value is null. - if (verVal.value() != null) { - ctx.addResult(map, - key, - verVal, - skipVals, - keepCacheObjects, - deserializeBinary, - true, - needVer); - } - - if (tx0 == null || (!tx0.implicit() && - tx0.isolation() == READ_COMMITTED)) - ctx.evicts().touch(entry, topVer); - - break; - } - catch (GridCacheEntryRemovedException ignore) { - if (log.isDebugEnabled()) - log.debug("Got removed entry during getAllAsync (will retry): " + - entry); - } - catch (IgniteCheckedException e) { - // Wrap errors (will be unwrapped). - throw new GridClosureException(e); - } - finally { - ctx.shared().database().checkpointReadUnlock(); - } - } - } - }); - - clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0); - - return map; - } - }), true), - new C2, Exception, IgniteInternalFuture>>() { - @Override public IgniteInternalFuture> apply(Map map, Exception e) { - if (e != null) { - clearReservationsIfNeeded(topVer, loadKeys, loaded, tx0); - - return new GridFinishedFuture<>(e); - } - - if (tx0 == null || (!tx0.implicit() && tx0.isolation() == READ_COMMITTED)) { - Collection notFound = new HashSet<>(loadKeys.keySet()); - - notFound.removeAll(loaded); - - // Touch entries that were not found in store. - for (KeyCacheObject key : notFound) { - GridCacheEntryEx entry = peekEx(key); - - if (entry != null) - ctx.evicts().touch(entry, topVer); - } - } - - // There were no misses. - return new GridFinishedFuture<>(Collections.emptyMap()); - } - }, - new C2, Exception, Map>() { - @Override public Map apply(Map loaded, Exception e) { - if (e == null) - map.putAll(loaded); - - return map; - } - } - ); - } - else - // Misses can be non-zero only if store is enabled. - assert misses == null; - - return new GridFinishedFuture<>(map); - } - catch (RuntimeException | AssertionError e) { - if (misses != null) { - for (KeyCacheObject key0 : misses.keySet()) - ctx.evicts().touch(peekEx(key0), topVer); - } - - if (newLocalEntries != null) { - for (GridCacheEntryEx entry : newLocalEntries) - removeEntry(entry); - } - - return new GridFinishedFuture<>(e); - } - catch (IgniteCheckedException e) { - return new GridFinishedFuture<>(e); - } - } - else { - return asyncOp(tx, new AsyncOp>(keys) { - @Override public IgniteInternalFuture> op(GridNearTxLocal tx, - AffinityTopologyVersion readyTopVer) { - return tx.getAllAsync(ctx, - readyTopVer, - keys, - deserializeBinary, - skipVals, - false, - !readThrough, - recovery, - needVer); - } - }, ctx.operationContextPerCall(), /*retry*/false); - } - } - - /** - * @param topVer Affinity topology version for which load was performed. * @param loadKeys Keys to load. * @param loaded Actually loaded keys. * @param tx0 Transaction within which the load was run, if any. */ - private void clearReservationsIfNeeded( - AffinityTopologyVersion topVer, + protected void clearReservationsIfNeeded( Map loadKeys, Collection loaded, IgniteTxLocalAdapter tx0 @@ -2227,7 +1838,7 @@ private void clearReservationsIfNeeded( entry.clearReserveForLoad(e.getValue().version()); if (needTouch) - ctx.evicts().touch(entry, topVer); + entry.touch(); } } } @@ -2426,7 +2037,7 @@ protected boolean put0(final K key, final V val, final CacheEntryPredicate filte @Override public IgniteInternalFuture putAllConflictAsync(final Map drMap) throws IgniteCheckedException { if (F.isEmpty(drMap)) - return new GridFinishedFuture(); + return new GridFinishedFuture<>(); ctx.dr().onReceiveCacheEntriesReceived(drMap.size()); @@ -2617,7 +2228,7 @@ private EntryProcessorResult invoke0( assert ret != null; - return ret.value() != null ? ret.>>value() : Collections.>emptyMap(); + return ret.value() != null ? ret.value() : Collections.emptyMap(); } }); } @@ -2655,7 +2266,7 @@ private EntryProcessorResult invoke0( assert ret != null; - return ret.value() != null ? ret.>>value() : Collections.>emptyMap(); + return ret.value() != null ? ret.value() : Collections.emptyMap(); } }); } @@ -2831,7 +2442,7 @@ protected void putAll0(final Map m) throws IgniteCheck /** {@inheritDoc} */ @Override public IgniteInternalFuture putAllAsync(final Map m) { if (F.isEmpty(m)) - return new GridFinishedFuture(); + return new GridFinishedFuture<>(); if (keyCheck) validateCacheKeys(m.keySet()); @@ -2890,12 +2501,14 @@ protected V getAndRemove0(final K key) throws IgniteCheckedException { @Override public V op(GridNearTxLocal tx) throws IgniteCheckedException { K key0 = keepBinary ? (K)ctx.toCacheKeyObject(key) : key; - V ret = tx.removeAllAsync(ctx, - null, - Collections.singletonList(key0), - /*retval*/true, - null, - /*singleRmv*/false).get().value(); + IgniteInternalFuture fut = tx.removeAllAsync(ctx, + null, + Collections.singletonList(key0), + /*retval*/true, + null, + /*singleRmv*/false); + + V ret = fut.get().value(); if (ctx.config().getInterceptor() != null) { K key = keepBinary ? (K)ctx.unwrapBinaryIfNeeded(key0, true, false) : key0; @@ -2927,7 +2540,7 @@ protected V getAndRemove0(final K key) throws IgniteCheckedException { IgniteInternalFuture fut = getAndRemoveAsync0(key); if (statsEnabled) - fut.listen(new UpdateRemoveTimeStatClosure(metrics0(), start)); + fut.listen(new UpdateRemoveTimeStatClosure<>(metrics0(), start)); return fut; } @@ -3021,7 +2634,7 @@ protected void removeAll0(final Collection keys) throws IgniteCheck /** {@inheritDoc} */ @Override public IgniteInternalFuture removeAllAsync(@Nullable final Collection keys) { if (F.isEmpty(keys)) - return new GridFinishedFuture(); + return new GridFinishedFuture<>(); final boolean statsEnabled = ctx.statisticsEnabled(); @@ -3142,7 +2755,7 @@ public IgniteInternalFuture removeAsync(final K key, @Nullable final Ca IgniteInternalFuture fut = removeAsync0(key, filter); if (statsEnabled) - fut.listen(new UpdateRemoveTimeStatClosure(metrics0(), start)); + fut.listen(new UpdateRemoveTimeStatClosure<>(metrics0(), start)); return fut; } @@ -3195,7 +2808,7 @@ protected IgniteInternalFuture removeAsync0(final K key, @Nullable fina @Override public IgniteInternalFuture removeAllConflictAsync(final Map drMap) throws IgniteCheckedException { if (F.isEmpty(drMap)) - return new GridFinishedFuture(); + return new GridFinishedFuture<>(); ctx.dr().onReceiveCacheEntriesReceived(drMap.size()); @@ -3234,7 +2847,7 @@ protected IgniteInternalFuture removeAsync0(final K key, @Nullable fina List metrics = new ArrayList<>(grp.nodes().size()); for (ClusterNode node : grp.nodes()) { - Map nodeCacheMetrics = ((TcpDiscoveryNode)node).cacheMetrics(); + Map nodeCacheMetrics = ((IgniteClusterNode)node).cacheMetrics(); if (nodeCacheMetrics != null) { CacheMetrics e = nodeCacheMetrics.get(context().cacheId()); @@ -3244,12 +2857,24 @@ protected IgniteInternalFuture removeAsync0(final K key, @Nullable fina } } - return new CacheMetricsSnapshot(ctx.cache().localMetrics(), metrics); + + return isCacheMetricsV2Supported() ? new CacheMetricsSnapshotV2(ctx.cache().localMetrics(), metrics) : + new CacheMetricsSnapshot(ctx.cache().localMetrics(), metrics); } /** {@inheritDoc} */ @Override public CacheMetrics localMetrics() { - return new CacheMetricsSnapshot(metrics); + return isCacheMetricsV2Supported() ? new CacheMetricsSnapshotV2(metrics) : + new CacheMetricsSnapshot(metrics); + } + + /** + * @return checks cluster server nodes version is compatible with Cache Metrics V2 + */ + private boolean isCacheMetricsV2Supported() { + Collection nodes = ctx.discovery().allNodes(); + + return IgniteFeatures.allNodesSupports(nodes, IgniteFeatures.CACHE_METRICS_V2); } /** {@inheritDoc} */ @@ -3528,7 +3153,7 @@ private void loadEntry(KeyCacheObject key, log.debug("Got removed entry during loadCache (will ignore): " + entry); } finally { - ctx.evicts().touch(entry, topVer); + entry.touch(); } CU.unwindEvicts(ctx); @@ -3940,7 +3565,6 @@ private Iterator> igniteIterator(boolean keepBinary, final CacheOperationContext opCtx = ctx.operationContextPerCall(); final GridCloseableIterator> iter = ctx0.queries().createScanQuery(p, null, keepBinary) - .keepAll(false) .executeScanQuery(); return ctx.itHolder().iterator(iter, new CacheIteratorConverter, Map.Entry>() { @@ -4079,7 +3703,8 @@ public void awaitLastFut() { READ_COMMITTED, tCfg.getDefaultTxTimeout(), !ctx.skipStore(), - 0 + 0, + null ); assert tx != null; @@ -4103,7 +3728,8 @@ public void awaitLastFut() { tx.xid(), e); } catch (IgniteCheckedException | AssertionError | RuntimeException e1) { - U.error(log, "Failed to rollback transaction (cache may contain stale locks): " + tx, e1); + U.error(log, "Failed to rollback transaction (cache may contain stale locks): " + + CU.txString(tx), e1); if (e != e1) e.addSuppressed(e1); @@ -4118,7 +3744,10 @@ public void awaitLastFut() { assert topVer != null && topVer.topologyVersion() > 0 : tx; - ctx.affinity().affinityReadyFuture(topVer.topologyVersion() + 1).get(); + AffinityTopologyVersion awaitVer = new AffinityTopologyVersion( + topVer.topologyVersion() + 1, 0); + + ctx.shared().exchange().affinityReadyFuture(awaitVer).get(); continue; } @@ -4126,6 +3755,16 @@ public void awaitLastFut() { throw e; } + catch (RuntimeException e) { + try { + tx.rollback(); + } + catch (IgniteCheckedException | AssertionError | RuntimeException e1) { + U.error(log, "Failed to rollback transaction " + CU.txString(tx), e1); + } + + throw e; + } finally { ctx.tm().resetContext(); @@ -4178,7 +3817,8 @@ private IgniteInternalFuture asyncOp(final AsyncOp op) { READ_COMMITTED, txCfg.getDefaultTxTimeout(), !skipStore, - 0); + 0, + null); return asyncOp(tx, op, opCtx, /*retry*/false); } @@ -4222,6 +3862,31 @@ protected IgniteInternalFuture asyncOp( final GridNearTxLocal tx0 = tx; + final CX1 clo = new CX1, T>() { + @Override public T applyx(IgniteInternalFuture tFut) throws IgniteCheckedException { + try { + return tFut.get(); + } + catch (IgniteTxTimeoutCheckedException | IgniteTxRollbackCheckedException | NodeStoppingException e) { + throw e; + } + catch (IgniteCheckedException e1) { + try { + tx0.rollbackNearTxLocalAsync(); + } + catch (Throwable e2) { + if (e1 != e2) + e1.addSuppressed(e2); + } + + throw e1; + } + finally { + ctx.shared().txContextReset(); + } + } + }; + if (fut != null && !fut.isDone()) { IgniteInternalFuture f = new GridEmbeddedFuture(fut, new IgniteOutClosure() { @@ -4231,31 +3896,7 @@ protected IgniteInternalFuture asyncOp( new IgniteCheckedException("Operation has been cancelled (node is stopping).")); try { - return op.op(tx0, opCtx).chain(new CX1, T>() { - @Override - public T applyx(IgniteInternalFuture tFut) throws IgniteCheckedException { - try { - return tFut.get(); - } - catch (IgniteTxRollbackCheckedException | NodeStoppingException e) { - throw e; - } - catch (IgniteCheckedException e1) { - try { - tx0.rollbackNearTxLocalAsync(); - } - catch (Throwable e2) { - if (e1 != e2) - e1.addSuppressed(e2); - } - - throw e1; - } - finally { - ctx.shared().txContextReset(); - } - } - }); + return op.op(tx0, opCtx).chain(clo); } finally { // It is necessary to clear tx context in this thread as well. @@ -4269,33 +3910,17 @@ public T applyx(IgniteInternalFuture tFut) throws IgniteCheckedException { return f; } + /** + * Wait for concurrent tx operation to finish. + * See {@link GridDhtTxLocalAdapter#updateLockFuture(IgniteInternalFuture, IgniteInternalFuture)} + */ + if (!tx0.txState().implicitSingle()) + tx0.txState().awaitLastFuture(ctx.shared()); + IgniteInternalFuture f; try { - f = op.op(tx, opCtx).chain(new CX1, T>() { - @Override public T applyx(IgniteInternalFuture tFut) throws IgniteCheckedException { - try { - return tFut.get(); - } - catch (IgniteTxRollbackCheckedException | NodeStoppingException e) { - throw e; - } - catch (IgniteCheckedException e1) { - try { - tx0.rollbackNearTxLocalAsync(); - } - catch (Throwable e2) { - if (e2 != e1) - e1.addSuppressed(e2); - } - - throw e1; - } - finally { - ctx.shared().txContextReset(); - } - } - }); + f = op.op(tx, opCtx).chain(clo); } finally { // It is necessary to clear tx context in this thread as well. @@ -4563,7 +4188,6 @@ protected V get0( try { return getAsync(key, - !ctx.config().isReadFromBackup(), /*skip tx*/false, null, taskName, @@ -4579,32 +4203,6 @@ protected V get0( } } - /** - * @param key Key. - * @param deserializeBinary Deserialize binary flag. - * @param needVer Need version. - * @return Read operation future. - */ - public final IgniteInternalFuture getAsync(final K key, boolean deserializeBinary, final boolean needVer) { - try { - checkJta(); - } - catch (IgniteCheckedException e) { - return new GridFinishedFuture<>(e); - } - - String taskName = ctx.kernalContext().job().currentTaskName(); - - return getAsync(key, - !ctx.config().isReadFromBackup(), - /*skip tx*/false, - null, - taskName, - deserializeBinary, - /*skip vals*/false, - needVer); - } - /** * @param keys Keys. * @param deserializeBinary Deserialize binary flag. @@ -4621,7 +4219,7 @@ protected Map getAll0(Collection keys, boolean deserializeBin CacheOperationContext opCtx = ctx.operationContextPerCall(); return getAllAsync(keys, - !ctx.config().isReadFromBackup(), + false, /*skip tx*/false, /*subject id*/null, taskName, @@ -4631,31 +4229,6 @@ protected Map getAll0(Collection keys, boolean deserializeBin needVer).get(); } - /** - * @param keys Keys. - * @param deserializeBinary Deserialize binary flag. - * @param needVer Need version. - * @return Read future. - */ - public IgniteInternalFuture> getAllAsync( - @Nullable Collection keys, - boolean deserializeBinary, - boolean recovery, - boolean needVer - ) { - String taskName = ctx.kernalContext().job().currentTaskName(); - - return getAllAsync(keys, - !ctx.config().isReadFromBackup(), - /*skip tx*/false, - /*subject id*/null, - taskName, - deserializeBinary, - recovery, - /*skip vals*/false, - needVer); - } - /** * @param entry Entry. * @param ver Version. @@ -4809,6 +4382,55 @@ private void advance() { return new CacheEntryImpl<>((K)key0, (V)val0, entry.version()); } + /** {@inheritDoc} */ + @Override public void preloadPartition(int part) throws IgniteCheckedException { + if (isLocal()) + ctx.offheap().preloadPartition(part); + else + executePreloadTask(part).get(); + } + + /** {@inheritDoc} */ + @Override public IgniteInternalFuture preloadPartitionAsync(int part) throws IgniteCheckedException { + if (isLocal()) { + return ctx.kernalContext().closure().runLocalSafe(() -> { + try { + ctx.offheap().preloadPartition(part); + } + catch (IgniteCheckedException e) { + throw new IgniteException(e); + } + }); + } + else + return executePreloadTask(part); + } + + /** {@inheritDoc} */ + @Override public boolean localPreloadPartition(int part) throws IgniteCheckedException { + if (!ctx.affinityNode()) + return false; + + GridDhtPartitionTopology top = ctx.group().topology(); + + @Nullable GridDhtLocalPartition p = top.localPartition(part, top.readyTopologyVersion(), false); + + if (p == null) + return false; + + try { + if (!p.reserve() || p.state() != OWNING) + return false; + + p.dataStore().preload(); + } + finally { + p.release(); + } + + return true; + } + /** * */ @@ -4856,7 +4478,8 @@ public void execute(boolean retry) { READ_COMMITTED, CU.transactionConfiguration(ctx, ctx.kernalContext().config()).getDefaultTxTimeout(), opCtx == null || !opCtx.skipStore(), - 0); + 0, + null); IgniteInternalFuture fut = asyncOp(tx, op, opCtx, retry); @@ -4880,8 +4503,10 @@ public void execute(boolean retry) { assert topVer != null && topVer.topologyVersion() > 0 : tx; + AffinityTopologyVersion awaitVer = new AffinityTopologyVersion(topVer.topologyVersion() + 1, 0); + IgniteInternalFuture topFut = - ctx.affinity().affinityReadyFuture(topVer.topologyVersion() + 1); + ctx.shared().exchange().affinityReadyFuture(awaitVer); topFut.listen(new IgniteInClosure>() { @Override public void apply(IgniteInternalFuture topFut) { @@ -5887,7 +5512,7 @@ private LocalStoreLoadClosure(@Nullable IgniteBiPredicate p, throws IgniteCheckedException { assert ver != null; - if (p != null && !p.apply(key.value(ctx.cacheObjectContext(), false), (V)val)) + if (p != null && !p.apply(key.value(ctx.cacheObjectContext(), false), (V)val)) return; long ttl = 0; @@ -6145,7 +5770,7 @@ public UpdatePutAndGetTimeStatClosure(CacheMetricsImpl metrics, long start) { /** * Delayed callable class. */ - public static abstract class TopologyVersionAwareJob extends ComputeJobAdapter { + public abstract static class TopologyVersionAwareJob extends ComputeJobAdapter { /** */ private static final long serialVersionUID = 0L; @@ -6514,6 +6139,52 @@ public ClearTask(String cacheName, AffinityTopologyVersion topVer, Set nodesByPartition(int part, AffinityTopologyVersion topV * @return Affinity assignment. */ public AffinityAssignment assignment(AffinityTopologyVersion topVer) { + return assignment(topVer, cctx.shared().exchange().lastAffinityChangedTopologyVersion(topVer)); + } + + /** + * Get affinity assignment for the given topology version. + * + * @param topVer Topology version. + * @return Affinity assignment. + */ + public AffinityAssignment assignment(AffinityTopologyVersion topVer, AffinityTopologyVersion lastAffChangedTopVer) { if (cctx.isLocal()) - topVer = LOC_CACHE_TOP_VER; + topVer = lastAffChangedTopVer = LOC_CACHE_TOP_VER; GridAffinityAssignmentCache aff0 = aff; if (aff0 == null) throw new IgniteException(FAILED_TO_FIND_CACHE_ERR_MSG + cctx.name()); - return aff0.cachedAffinity(topVer); + return aff0.cachedAffinity(topVer, lastAffChangedTopVer); } /** diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAttributes.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAttributes.java index faad1ec58b9ef..a0c996cbddb4c 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAttributes.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheAttributes.java @@ -25,6 +25,7 @@ import org.apache.ignite.cache.affinity.AffinityFunction; import org.apache.ignite.cache.affinity.rendezvous.RendezvousAffinityFunction; import org.apache.ignite.configuration.CacheConfiguration; +import org.apache.ignite.configuration.IgniteConfiguration; import org.apache.ignite.configuration.NearCacheConfiguration; import org.apache.ignite.configuration.TransactionConfiguration; import org.apache.ignite.internal.util.typedef.internal.S; @@ -216,7 +217,9 @@ public long defaultLockTimeout() { /** * @return Preload batch size. + * @deprecated Use {@link IgniteConfiguration#getRebalanceBatchSize()} instead. */ + @Deprecated public int rebalanceBatchSize() { return ccfg.getRebalanceBatchSize(); } @@ -230,7 +233,9 @@ public long rebalanceDelay() { /** * @return Rebalance prefetch count. + * @deprecated Use {@link IgniteConfiguration#getRebalanceBatchesPrefetchCount()} instead. */ + @Deprecated public long rebalanceBatchesPrefetchCount() { return ccfg.getRebalanceBatchesPrefetchCount(); } @@ -244,14 +249,18 @@ public int rebalanceOrder() { /** * @return Rebalance throttle. + * @deprecated Use {@link IgniteConfiguration#getRebalanceThrottle()} instead. */ + @Deprecated public long rebalanceThrottle() { return ccfg.getRebalanceThrottle(); } /** * @return Rebalance timeout. + * @deprecated Use {@link IgniteConfiguration#getRebalanceTimeout()} instead. */ + @Deprecated public long rebalanceTimeout() { return ccfg.getRebalanceTimeout(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheConcurrentMapImpl.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheConcurrentMapImpl.java index c8cab6674f4d8..91dfe59312dbc 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheConcurrentMapImpl.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheConcurrentMapImpl.java @@ -26,7 +26,6 @@ import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.util.typedef.F; import org.apache.ignite.lang.IgnitePredicate; -import org.apache.ignite.lang.IgniteUuid; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.events.EventType.EVT_CACHE_ENTRY_CREATED; @@ -168,7 +167,8 @@ protected final GridCacheMapEntry putEntryIfObsoleteOrAbsent( ctx.events().addEvent(doomed.partition(), doomed.key(), ctx.localNodeId(), - (IgniteUuid)null, + null, + null, null, EVT_CACHE_ENTRY_DESTROYED, null, @@ -188,7 +188,8 @@ protected final GridCacheMapEntry putEntryIfObsoleteOrAbsent( ctx.events().addEvent(created.partition(), created.key(), ctx.localNodeId(), - (IgniteUuid)null, + null, + null, null, EVT_CACHE_ENTRY_CREATED, null, @@ -201,9 +202,7 @@ protected final GridCacheMapEntry putEntryIfObsoleteOrAbsent( true); if (touch) - ctx.evicts().touch( - cur, - topVer); + cur.touch(); } assert Math.abs(sizeChange) <= 1; @@ -276,7 +275,8 @@ else if (sizeChange == -1) ctx.events().addEvent(entry.partition(), entry.key(), ctx.localNodeId(), - (IgniteUuid)null, + null, + null, null, EVT_CACHE_ENTRY_DESTROYED, null, diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java index f194423320737..fa42caffda2a2 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheContext.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.cache; +import java.io.Closeable; import java.io.Externalizable; import java.io.IOException; import java.io.InvalidObjectException; @@ -29,6 +30,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.UUID; import java.util.concurrent.Callable; import java.util.concurrent.CountDownLatch; @@ -64,11 +66,11 @@ import org.apache.ignite.internal.processors.cache.datastructures.CacheDataStructuresManager; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheAdapter; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTopologyFuture; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTransactionalCacheAdapter; import org.apache.ignite.internal.processors.cache.distributed.dht.colocated.GridDhtColocatedCache; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionTopology; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheAdapter; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearTransactionalCache; import org.apache.ignite.internal.processors.cache.dr.GridCacheDrManager; @@ -108,6 +110,7 @@ import org.apache.ignite.plugin.security.SecurityPermission; import org.jetbrains.annotations.Nullable; +import static org.apache.ignite.IgniteSystemProperties.IGNITE_DISABLE_TRIGGERING_CACHE_INTERCEPTOR_ON_CONFLICT; import static org.apache.ignite.IgniteSystemProperties.IGNITE_READ_LOAD_BALANCING; import static org.apache.ignite.cache.CacheAtomicityMode.ATOMIC; import static org.apache.ignite.cache.CacheAtomicityMode.TRANSACTIONAL; @@ -116,7 +119,7 @@ import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_STARTED; import static org.apache.ignite.events.EventType.EVT_CACHE_REBALANCE_STOPPED; import static org.apache.ignite.internal.IgniteNodeAttributes.ATTR_MACS; -import static org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtPartitionState.OWNING; +import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.OWNING; /** * Cache context. @@ -224,7 +227,7 @@ public class GridCacheContext implements Externalizable { private CacheWeakQueryIteratorsHolder> itHolder; /** Affinity node. */ - private boolean affNode; + private volatile boolean affNode; /** Conflict resolver. */ private CacheVersionConflictResolver conflictRslvr; @@ -236,10 +239,10 @@ public class GridCacheContext implements Externalizable { private CountDownLatch startLatch = new CountDownLatch(1); /** Topology version when cache was started on local node. */ - private AffinityTopologyVersion locStartTopVer; + private volatile AffinityTopologyVersion locStartTopVer; /** Dynamic cache deployment ID. */ - private IgniteUuid dynamicDeploymentId; + private volatile IgniteUuid dynamicDeploymentId; /** Updates allowed flag. */ private boolean updatesAllowed; @@ -269,7 +272,14 @@ public class GridCacheContext implements Externalizable { private boolean readFromBackup = CacheConfiguration.DFLT_READ_FROM_BACKUP; /** Local node's MAC address. */ - private String locMacs; + private volatile String locMacs; + + /** Recovery mode flag. */ + private volatile boolean recoveryMode; + + /** */ + private final boolean disableTriggeringCacheInterceptorOnConflict = + Boolean.parseBoolean(System.getProperty(IGNITE_DISABLE_TRIGGERING_CACHE_INTERCEPTOR_ON_CONFLICT, "false")); /** * Empty constructor required for {@link Externalizable}. @@ -307,8 +317,11 @@ public GridCacheContext( CacheGroupContext grp, CacheType cacheType, AffinityTopologyVersion locStartTopVer, + IgniteUuid deploymentId, boolean affNode, boolean updatesAllowed, + boolean statisticsEnabled, + boolean recoveryMode, /* * Managers in starting order! @@ -393,9 +406,46 @@ public GridCacheContext( readFromBackup = cacheCfg.isReadFromBackup(); + this.dynamicDeploymentId = deploymentId; + this.recoveryMode = recoveryMode; + + statisticsEnabled(statisticsEnabled); + + assert kernalContext().recoveryMode() == recoveryMode; + + if (!recoveryMode) { + locMacs = localNode().attribute(ATTR_MACS); + + assert locMacs != null; + } + } + + /** + * Called when cache was restored during recovery and node has joined to topology. + * + * @param topVer Cache topology join version. + * @param clusterWideDesc Cluster-wide cache descriptor received during exchange. + */ + public void finishRecovery(AffinityTopologyVersion topVer, DynamicCacheDescriptor clusterWideDesc) { + assert recoveryMode : this; + + recoveryMode = false; + + locStartTopVer = topVer; + locMacs = localNode().attribute(ATTR_MACS); assert locMacs != null; + + this.statisticsEnabled = clusterWideDesc.cacheConfiguration().isStatisticsEnabled(); + this.dynamicDeploymentId = clusterWideDesc.deploymentId(); + } + + /** + * @return {@code True} if cache is in recovery mode. + */ + public boolean isRecoveryMode() { + return recoveryMode; } /** @@ -419,13 +469,6 @@ public boolean customAffinityMapper() { return customAffMapper; } - /** - * @param dynamicDeploymentId Dynamic deployment ID. - */ - void dynamicDeploymentId(IgniteUuid dynamicDeploymentId) { - this.dynamicDeploymentId = dynamicDeploymentId; - } - /** * @return Dynamic deployment ID. */ @@ -635,14 +678,14 @@ public boolean isLocal() { * @return {@code True} if cache is replicated cache. */ public boolean isReplicated() { - return cacheCfg.getCacheMode() == CacheMode.REPLICATED; + return config().getCacheMode() == CacheMode.REPLICATED; } /** * @return {@code True} if cache is partitioned cache. */ public boolean isPartitioned() { - return cacheCfg.getCacheMode() == CacheMode.PARTITIONED; + return config().getCacheMode() == CacheMode.PARTITIONED; } /** @@ -656,7 +699,7 @@ public boolean isDrEnabled() { * @return {@code True} in case cache supports query. */ public boolean isQueryEnabled() { - return !F.isEmpty(cacheCfg.getQueryEntities()); + return !F.isEmpty(config().getQueryEntities()); } /** @@ -781,7 +824,7 @@ public void checkSecurity(SecurityPermission op) throws SecurityException { if (CU.isSystemCache(name())) return; - ctx.security().authorize(name(), op, null); + ctx.security().authorize(name(), op); } /** @@ -809,14 +852,23 @@ public boolean rebalanceEnabled() { * @return {@code True} if atomic. */ public boolean atomic() { - return cacheCfg.getAtomicityMode() == ATOMIC; + return config().getAtomicityMode() == ATOMIC; } /** * @return {@code True} if transactional. */ public boolean transactional() { - return cacheCfg.getAtomicityMode() == TRANSACTIONAL; + CacheConfiguration cfg = config(); + + return cfg.getAtomicityMode() == TRANSACTIONAL; + } + + /** + * @return {@code True} if cache interceptor should be skipped in case of conflicts. + */ + public boolean disableTriggeringCacheInterceptorOnConflict() { + return disableTriggeringCacheInterceptorOnConflict; } /** @@ -989,9 +1041,15 @@ public GridCacheAdapter cache() { /** * @return Cache configuration for given cache instance. + * @throws IllegalStateException If this cache context was cleaned up. */ public CacheConfiguration config() { - return cacheCfg; + CacheConfiguration res = cacheCfg; + + if (res == null) + throw new IllegalStateException((new CacheStoppedException(name()))); + + return res; } /** @@ -1000,7 +1058,7 @@ public CacheConfiguration config() { * are set to {@code true} or the store is local. */ public boolean writeToStoreFromDht() { - return store().isLocal() || cacheCfg.isWriteBehindEnabled(); + return store().isLocal() || config().isWriteBehindEnabled(); } /** @@ -1452,56 +1510,56 @@ public boolean deploymentEnabled() { * @return {@code True} if store read-through mode is enabled. */ public boolean readThrough() { - return cacheCfg.isReadThrough() && !skipStore(); + return config().isReadThrough() && !skipStore(); } /** * @return {@code True} if store and read-through mode are enabled in configuration. */ public boolean readThroughConfigured() { - return store().configured() && cacheCfg.isReadThrough(); + return store().configured() && config().isReadThrough(); } /** * @return {@code True} if {@link CacheConfiguration#isLoadPreviousValue()} flag is set. */ public boolean loadPreviousValue() { - return cacheCfg.isLoadPreviousValue(); + return config().isLoadPreviousValue(); } /** * @return {@code True} if store write-through is enabled. */ public boolean writeThrough() { - return cacheCfg.isWriteThrough() && !skipStore(); + return config().isWriteThrough() && !skipStore(); } /** * @return {@code True} if invalidation is enabled. */ public boolean isInvalidate() { - return cacheCfg.isInvalidate(); + return config().isInvalidate(); } /** * @return {@code True} if synchronous commit is enabled. */ public boolean syncCommit() { - return cacheCfg.getWriteSynchronizationMode() == FULL_SYNC; + return config().getWriteSynchronizationMode() == FULL_SYNC; } /** * @return {@code True} if synchronous rollback is enabled. */ public boolean syncRollback() { - return cacheCfg.getWriteSynchronizationMode() == FULL_SYNC; + return config().getWriteSynchronizationMode() == FULL_SYNC; } /** * @return {@code True} if only primary node should be updated synchronously. */ public boolean syncPrimary() { - return cacheCfg.getWriteSynchronizationMode() == PRIMARY_SYNC; + return config().getWriteSynchronizationMode() == PRIMARY_SYNC; } /** @@ -1717,7 +1775,7 @@ public boolean keepBinary() { * of {@link CacheConfiguration#isCopyOnRead()}. */ public boolean needValueCopy() { - return affNode && cacheCfg.isCopyOnRead(); + return affNode && config().isCopyOnRead(); } /** @@ -1799,7 +1857,7 @@ public CacheObjectContext cacheObjectContext() { @Nullable public CacheObject toCacheObject(@Nullable Object obj) { assert validObjectForCache(obj) : obj; - return cacheObjects().toCacheObject(cacheObjCtx, obj, true); + return cacheObjects().toCacheObject(cacheObjCtx, obj, true, grp.isTopologyLocked()); } /** @@ -2011,6 +2069,9 @@ public void cleanup() { dataStructuresMgr = null; cacheObjCtx = null; + if (expiryPlc instanceof Closeable) + U.closeQuiet((Closeable)expiryPlc); + mgrs.clear(); } @@ -2072,7 +2133,7 @@ public boolean reserveForFastLocalGet(int part, AffinityTopologyVersion topVer) topology().partitionState(localNodeId(), part) == OWNING : "result=" + result + ", persistenceEnabled=" + group().persistenceEnabled() + ", partitionState=" + topology().partitionState(localNodeId(), part) + - ", replicated=" + isReplicated(); + ", replicated=" + isReplicated() + ", part=" + part; return result; } @@ -2185,24 +2246,37 @@ else if (type == EVT_CACHE_REBALANCE_STOPPED) { * * @param affNodes All affinity nodes. * @param canRemap Flag indicating that 'get' should be done on a locked topology version. + * @param partitionId Partition ID. * @return Affinity node to get key from or {@code null} if there is no suitable alive node. */ - @Nullable public ClusterNode selectAffinityNodeBalanced(List affNodes, boolean canRemap) { + @Nullable public ClusterNode selectAffinityNodeBalanced( + List affNodes, + Set invalidNodes, + int partitionId, + boolean canRemap + ) { if (!readLoadBalancingEnabled) { if (!canRemap) { + // Find next available node if we can not wait next topology version. for (ClusterNode node : affNodes) { - if (ctx.discovery().alive(node)) + if (ctx.discovery().alive(node) && !invalidNodes.contains(node)) return node; } return null; } - else - return affNodes.get(0); + else { + ClusterNode first = affNodes.get(0); + + return !invalidNodes.contains(first) ? first : null; + } } - if (!readFromBackup) - return affNodes.get(0); + if (!readFromBackup){ + ClusterNode first = affNodes.get(0); + + return !invalidNodes.contains(first) ? first : null; + } assert locMacs != null; @@ -2211,7 +2285,7 @@ else if (type == EVT_CACHE_REBALANCE_STOPPED) { ClusterNode n0 = null; for (ClusterNode node : affNodes) { - if (canRemap || discovery().alive(node)) { + if ((canRemap || discovery().alive(node)) && !invalidNodes.contains(node)) { if (locMacs.equals(node.attribute(ATTR_MACS))) return node; diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java index b70439a7d08ee..3df507a4434ba 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheDeploymentManager.java @@ -92,6 +92,9 @@ public class GridCacheDeploymentManager extends GridCacheSharedManagerAdap /** */ private boolean depEnabled; + /** Class loader id for local thread. */ + private ThreadLocal localLdrId = new ThreadLocal<>(); + /** {@inheritDoc} */ @Override public void start0() throws IgniteCheckedException { globalLdr = new CacheClassLoader(cctx.gridConfig().getClassLoader()); @@ -365,6 +368,8 @@ public void p2pContext( DeploymentMode mode, Map participants ) { + localLdrId.set(ldrId); + assert depEnabled; if (mode == PRIVATE || mode == ISOLATED) { @@ -760,7 +765,7 @@ public boolean isGlobalLoader() { /** * Cache class loader. */ - private class CacheClassLoader extends ClassLoader { + private class CacheClassLoader extends ClassLoader implements CacheClassLoaderMarker { /** */ private final String[] p2pExclude; @@ -804,31 +809,25 @@ private CacheClassLoader(ClassLoader classLdr) { } } - for (CachedDeploymentInfo t : deps.values()) { - UUID sndId = t.senderId(); - IgniteUuid ldrId = t.loaderId(); - String userVer = t.userVersion(); - DeploymentMode mode = t.mode(); - Map participants = t.participants(); - - GridDeployment d = cctx.gridDeploy().getGlobalDeployment( - mode, - name, - name, - userVer, - sndId, - ldrId, - participants, - F.alwaysTrue()); + IgniteUuid curLdrId = localLdrId.get(); - if (d != null) { - Class cls = d.deployedClass(name); + if (curLdrId != null) { + CachedDeploymentInfo t = deps.get(curLdrId); + + if (t != null) { + Class cls = tryToloadClassFromCacheDep(name, t); if (cls != null) return cls; } } + for (CachedDeploymentInfo t : deps.values()) { + Class cls = tryToloadClassFromCacheDep(name, t); + if (cls != null) + return cls; + } + Class cls = getParent().loadClass(name); if (cls != null) @@ -837,6 +836,33 @@ private CacheClassLoader(ClassLoader classLdr) { throw new ClassNotFoundException("Failed to load class [name=" + name+ ", ctx=" + deps + ']'); } + /** + * @param name Name of resource. + * @param deploymentInfo Grid cached deployment info. + * @return Class if can to load resource with the name or {@code null} otherwise. + */ + @Nullable private Class tryToloadClassFromCacheDep(String name, CachedDeploymentInfo deploymentInfo) { + UUID sndId = deploymentInfo.senderId(); + IgniteUuid ldrId = deploymentInfo.loaderId(); + String userVer = deploymentInfo.userVersion(); + DeploymentMode mode = deploymentInfo.mode(); + Map participants = deploymentInfo.participants(); + + GridDeployment d = cctx.gridDeploy().getGlobalDeployment( + mode, + name, + name, + userVer, + sndId, + ldrId, + participants, + F.alwaysTrue()); + + Class cls = d != null ? d.deployedClass(name) : null; + + return cls; + } + /** * @param name Name of the class. * @return {@code True} if locally excluded. diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java index 6cddb2da10a06..4d9bf4795d5cf 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEntryEx.java @@ -26,7 +26,7 @@ import org.apache.ignite.cache.eviction.EvictableEntry; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.distributed.GridDistributedLockCancelledException; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicAbstractUpdateFuture; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; @@ -98,11 +98,6 @@ public interface GridCacheEntryEx { */ public int partition(); - /** - * @return Start version. - */ - public long startVersion(); - /** * @return Key. */ @@ -870,12 +865,11 @@ public Collection localCandidates(@Nullable GridCacheVer /** * Update index from within entry lock, passing key, value, and expiration time to provided closure. * - * @param filter Row filter. * @param clo Closure to apply to key, value, and expiration time. * @throws IgniteCheckedException If failed. * @throws GridCacheEntryRemovedException If entry was removed. */ - public void updateIndex(SchemaIndexCacheFilter filter, SchemaIndexCacheVisitorClosure clo) + public void updateIndex(SchemaIndexCacheVisitorClosure clo) throws IgniteCheckedException, GridCacheEntryRemovedException; /** @@ -1044,4 +1038,10 @@ public void updateIndex(SchemaIndexCacheFilter filter, SchemaIndexCacheVisitorCl * @return {@code True} if the entry is locked. */ public boolean lockedByCurrentThread(); + + /** + * Touch this entry in its context's eviction manager. + * + */ + public void touch(); } diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java index 3c5cf1e944ad1..c095ebe27fdc7 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEventManager.java @@ -20,6 +20,7 @@ import java.util.Collection; import java.util.UUID; import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.configuration.CacheConfiguration; import org.apache.ignite.events.CacheEvent; import org.apache.ignite.internal.managers.eventstorage.GridLocalEventListener; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; @@ -63,6 +64,7 @@ public void removeListener(GridLocalEventListener lsnr) { /** * @param key Key for event. * @param tx Possible surrounding transaction. + * @param txLbl Possible lable of possible surrounding transaction. * @param val Read value. * @param subjId Subject ID. * @param taskName Task name. @@ -70,6 +72,7 @@ public void removeListener(GridLocalEventListener lsnr) { */ public void readEvent(KeyCacheObject key, @Nullable IgniteInternalTx tx, + @Nullable String txLbl, @Nullable CacheObject val, @Nullable UUID subjId, @Nullable String taskName, @@ -77,7 +80,9 @@ public void readEvent(KeyCacheObject key, if (isRecordable(EVT_CACHE_OBJECT_READ)) { addEvent(cctx.affinity().partition(key), key, + cctx.localNodeId(), tx, + txLbl, null, EVT_CACHE_OBJECT_READ, val, @@ -107,7 +112,7 @@ public void readEvent(KeyCacheObject key, */ public void addEvent(int part, KeyCacheObject key, - IgniteInternalTx tx, + @Nullable IgniteInternalTx tx, @Nullable GridCacheMvccCandidate owner, int type, @Nullable CacheObject newVal, @@ -143,7 +148,8 @@ public void addEvent(int type) { 0, null, cctx.localNodeId(), - (IgniteUuid)null, + null, + null, null, type, null, @@ -174,7 +180,7 @@ public void addEvent(int type) { public void addEvent(int part, KeyCacheObject key, UUID nodeId, - IgniteInternalTx tx, + @Nullable IgniteInternalTx tx, GridCacheMvccCandidate owner, int type, CacheObject newVal, @@ -188,7 +194,9 @@ public void addEvent(int part, { addEvent(part, key, - nodeId, tx == null ? null : tx.xid(), + nodeId, + tx, + null, owner == null ? null : owner.version(), type, newVal, @@ -234,7 +242,8 @@ public void addEvent(int part, addEvent(part, key, evtNodeId, - tx == null ? null : tx.xid(), + tx, + null, owner == null ? null : owner.version(), type, newVal, @@ -251,7 +260,8 @@ public void addEvent(int part, * @param part Partition. * @param key Key for the event. * @param evtNodeId Event node ID. - * @param xid Transaction ID. + * @param tx Possible surrounding transaction. + * @param txLbl Possible label of possible surrounding transaction. * @param lockId Lock ID. * @param type Event type. * @param newVal New value. @@ -266,7 +276,8 @@ public void addEvent( int part, KeyCacheObject key, UUID evtNodeId, - @Nullable IgniteUuid xid, + @Nullable IgniteInternalTx tx, + @Nullable String txLbl, @Nullable Object lockId, int type, @Nullable CacheObject newVal, @@ -324,6 +335,10 @@ public void addEvent( oldVal0 = cctx.cacheObjectContext().unwrapBinaryIfNeeded(oldVal, true, false); } + IgniteUuid xid = tx == null ? null : tx.xid(); + + String finalTxLbl = (tx == null || tx.label() == null) ? txLbl : tx.label(); + cctx.gridEvents().record(new CacheEvent(cctx.name(), cctx.localNode(), evtNode, @@ -333,6 +348,7 @@ public void addEvent( cctx.isNear(), key0, xid, + finalTxLbl, lockId, val0, hasNewVal, @@ -372,8 +388,19 @@ public void addEvent( public boolean isRecordable(int type) { GridCacheContext cctx0 = cctx; - return cctx0 != null && cctx0.userCache() && cctx0.gridEvents().isRecordable(type) - && !cctx0.config().isEventsDisabled(); + // Event recording is impossible in recovery mode. + if (cctx0 == null || cctx0.kernalContext().recoveryMode()) + return false; + + try { + CacheConfiguration cfg = cctx0.config(); + + return cctx0.userCache() && cctx0.gridEvents().isRecordable(type) && !cfg.isEventsDisabled(); + } + catch (IllegalStateException e) { + // Cache context was cleaned up. + return false; + } } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java index 2c9dec7f5d25b..2853f00d0dd86 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheEvictionManager.java @@ -22,7 +22,6 @@ import org.apache.ignite.cache.eviction.EvictionFilter; import org.apache.ignite.cache.eviction.EvictionPolicy; import org.apache.ignite.configuration.CacheConfiguration; -import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry; import org.apache.ignite.internal.processors.cache.version.GridCacheVersion; import org.apache.ignite.internal.processors.cache.version.GridCacheVersionManager; @@ -150,7 +149,7 @@ private boolean evict0( cache.metrics0().onEvict(); if (recordable) - cctx.events().addEvent(entry.partition(), entry.key(), cctx.nodeId(), (IgniteUuid)null, null, + cctx.events().addEvent(entry.partition(), entry.key(), cctx.nodeId(), null, null, null, EVT_CACHE_ENTRY_EVICTED, null, false, oldVal, hasVal, null, null, null, false); if (log.isDebugEnabled()) @@ -166,6 +165,11 @@ private boolean evict0( /** {@inheritDoc} */ @Override public void touch(IgniteTxEntry txEntry, boolean loc) { + assert txEntry.context() == cctx : "Entry from another cache context passed to eviction manager: [" + + "entry=" + txEntry + + ", cctx=" + cctx + + ", entryCtx=" + txEntry.context() + "]"; + if (!plcEnabled) return; @@ -191,7 +195,12 @@ private boolean evict0( } /** {@inheritDoc} */ - @Override public void touch(GridCacheEntryEx e, AffinityTopologyVersion topVer) { + @Override public void touch(GridCacheEntryEx e) { + assert e.context() == cctx : "Entry from another cache context passed to eviction manager: [" + + "entry=" + e + + ", cctx=" + cctx + + ", entryCtx=" + e.context() + "]"; + if (e.detached() || e.isInternal()) return; @@ -233,13 +242,17 @@ private void warnFirstEvict() { } U.warn(log, "Evictions started (cache may have reached its capacity)." + - " You may wish to increase 'maxSize' on eviction policy being used for cache: " + cctx.name(), - "Evictions started (cache may have reached its capacity): " + cctx.name()); + " You may wish to increase 'maxSize' on eviction policy being used for cache: " + cctx.name()); } /** {@inheritDoc} */ @Override public boolean evict(@Nullable GridCacheEntryEx entry, @Nullable GridCacheVersion obsoleteVer, boolean explicit, @Nullable CacheEntryPredicate[] filter) throws IgniteCheckedException { + assert entry == null || entry.context() == cctx : "Entry from another cache context passed to eviction manager: [" + + "entry=" + entry + + ", cctx=" + cctx + + ", entryCtx=" + entry.context() + "]"; + if (entry == null) return true; @@ -277,7 +290,7 @@ private void warnFirstEvict() { notifyPolicy(entry); if (recordable) - cctx.events().addEvent(entry.partition(), entry.key(), cctx.nodeId(), (IgniteUuid)null, null, + cctx.events().addEvent(entry.partition(), entry.key(), cctx.nodeId(), null, null, null, EVT_CACHE_ENTRY_EVICTED, null, false, entry.rawGet(), entry.hasValue(), null, null, null, false); } @@ -292,6 +305,10 @@ private void notifyPolicy(GridCacheEntryEx e) { assert plcEnabled; assert plc != null; assert !e.isInternal() : "Invalid entry for policy notification: " + e; + assert e.context() == cctx : "Entry from another cache context passed to eviction manager: [" + + "entry=" + e + + ", cctx=" + cctx + + ", entryCtx=" + e.context() + "]"; if (log.isDebugEnabled()) log.debug("Notifying eviction policy with entry: " + e); diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java index b9a4b257b529f..7e1d867c4ec21 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGateway.java @@ -133,6 +133,8 @@ public void leaveNoLock() { ctx.tm().resetContext(); ctx.mvcc().contextReset(); + ctx.tm().leaveNearTxSystemSection(); + // Unwind eviction notifications. if (!ctx.shared().closed(ctx)) CU.unwindEvicts(ctx); @@ -172,6 +174,8 @@ public void leave() { onEnter(); + ctx.tm().enterNearTxSystemSection(); + Lock lock = rwLock.readLock(); lock.lock(); @@ -239,6 +243,8 @@ public void leaveNoLock(CacheOperationContext prev) { // Unwind eviction notifications. CU.unwindEvicts(ctx); + ctx.tm().leaveNearTxSystemSection(); + // Return back previous thread local operation context per call. ctx.operationContextPerCall(prev); } @@ -253,6 +259,13 @@ private void onEnter() { ctx.deploy().onEnter(); } + /** + * + */ + public boolean isStopped() { + return !checkState(false, false); + } + /** * */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGroupIdMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGroupIdMessage.java index 09c143b0c0dd3..bfdce35e86e62 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGroupIdMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheGroupIdMessage.java @@ -50,7 +50,7 @@ public int groupId() { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 3; + return 4; } /** {@inheritDoc} */ @@ -68,7 +68,7 @@ public int groupId() { } switch (writer.state()) { - case 2: + case 3: if (!writer.writeInt("grpId", grpId)) return false; @@ -90,7 +90,7 @@ public int groupId() { return false; switch (reader.state()) { - case 2: + case 3: grpId = reader.readInt("grpId"); if (!reader.isLastRead()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIdMessage.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIdMessage.java index 6c20bdd15bdd6..e0944397ecf3d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIdMessage.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIdMessage.java @@ -52,7 +52,7 @@ public void cacheId(int cacheId) { /** {@inheritDoc} */ @Override public byte fieldsCount() { - return 3; + return 4; } /** {@inheritDoc} */ @@ -70,7 +70,7 @@ public void cacheId(int cacheId) { } switch (writer.state()) { - case 2: + case 3: if (!writer.writeInt("cacheId", cacheId)) return false; @@ -92,7 +92,7 @@ public void cacheId(int cacheId) { return false; switch (reader.state()) { - case 2: + case 3: cacheId = reader.readInt("cacheId"); if (!reader.isLastRead()) diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java index 3182192678cbc..cf9a90f5b5f5d 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheIoManager.java @@ -34,6 +34,8 @@ import org.apache.ignite.IgniteLogger; import org.apache.ignite.binary.BinaryObjectException; import org.apache.ignite.cluster.ClusterNode; +import org.apache.ignite.failure.FailureContext; +import org.apache.ignite.failure.FailureType; import org.apache.ignite.internal.IgniteClientDisconnectedCheckedException; import org.apache.ignite.internal.IgniteInternalFuture; import org.apache.ignite.internal.cluster.ClusterTopologyCheckedException; @@ -43,6 +45,7 @@ import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.distributed.dht.CacheGetFuture; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAffinityAssignmentRequest; +import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtAffinityAssignmentResponse; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLockRequest; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLockResponse; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtTxFinishRequest; @@ -163,6 +166,11 @@ public void dumpPendingMessages(StringBuilder sb) { final GridCacheMessage cacheMsg = (GridCacheMessage)msg; + AffinityTopologyVersion rmtAffVer = cacheMsg.topologyVersion(); + AffinityTopologyVersion lastAffChangedVer = cacheMsg.lastAffinityChangedTopologyVersion(); + + cctx.exchange().lastAffinityChangedTopologyVersion(rmtAffVer, lastAffChangedVer); + IgniteInternalFuture fut = null; if (cacheMsg.partitionExchangeMessage()) { @@ -220,9 +228,8 @@ else if (desc.receivedFromStartVersion() != null) } else { AffinityTopologyVersion locAffVer = cctx.exchange().readyAffinityVersion(); - AffinityTopologyVersion rmtAffVer = cacheMsg.topologyVersion(); - if (locAffVer.compareTo(rmtAffVer) < 0) { + if (locAffVer.before(lastAffChangedVer)) { IgniteLogger log = cacheMsg.messageLogger(cctx); if (log.isDebugEnabled()) { @@ -232,12 +239,13 @@ else if (desc.receivedFromStartVersion() != null) msg0.append(", locTopVer=").append(locAffVer). append(", rmtTopVer=").append(rmtAffVer). + append(", lastAffChangedVer=").append(lastAffChangedVer). append(']'); log.debug(msg0.toString()); } - fut = cctx.exchange().affinityReadyFuture(rmtAffVer); + fut = cctx.exchange().affinityReadyFuture(lastAffChangedVer); } } @@ -579,7 +587,22 @@ private void onMessage0(final UUID nodeId, final GridCacheMessage cacheMsg, processMessage(nodeId, cacheMsg, c); } catch (Throwable e) { - U.error(log, "Failed to process message [senderId=" + nodeId + ", messageType=" + cacheMsg.getClass() + ']', e); + String msgStr; + + try { + msgStr = String.valueOf(cacheMsg); + } + catch (Throwable e0) { + String clsName = cacheMsg.getClass().getName(); + + U.error(log, "Failed to log message due to an error: " + clsName, e0); + + msgStr = clsName + "(failed to log message)"; + } + + U.error(log, "Failed to process message [senderId=" + nodeId + ", msg=" + msgStr + ']', e); + + cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); if (e instanceof Error) throw (Error)e; @@ -609,7 +632,6 @@ private void sendResponseOnFailedMessage(UUID nodeId, GridCacheMessage res, Grid } } - /** * @param cacheMsg Cache message. * @param nodeId Node ID. @@ -863,6 +885,7 @@ private void processFailedMessage(UUID nodeId, 0, req.classError(), null, + false, false); sendResponseOnFailedMessage(nodeId, res, cctx, plc); @@ -912,7 +935,8 @@ private void processFailedMessage(UUID nodeId, break; - case 114: { + case 114: + case 120: { processMessage(nodeId, msg, c);// Will be handled by Rebalance Demander. } @@ -1057,10 +1081,18 @@ private void processMessage(UUID nodeId, GridCacheMessage msg, IgniteBiInClosure log.debug("Finished processing cache communication message [nodeId=" + nodeId + ", msg=" + msg + ']'); } catch (Throwable e) { - U.error(log, "Failed processing message [senderId=" + nodeId + ", msg=" + msg + ']', e); + try { + U.error(log, "Failed processing message [senderId=" + nodeId + ", msg=" + msg + ']', e); + } + catch (Throwable e0) { + U.error(log, "Failed processing message [senderId=" + nodeId + ", msg=(failed to log message)", e); - if (e instanceof Error) - throw e; + U.error(log, "Failed to log message due to an error: ", e0); + } + + cctx.kernalContext().failure().process(new FailureContext(FailureType.CRITICAL_ERROR, e)); + + throw e; } finally { onMessageProcessed(msg); @@ -1145,6 +1177,8 @@ public boolean checkNodeLeft(UUID nodeId, IgniteCheckedException sndErr, boolean public void send(ClusterNode node, GridCacheMessage msg, byte plc) throws IgniteCheckedException { assert !node.isLocal() : node; + msg.lastAffinityChangedTopologyVersion(cctx.exchange().lastAffinityChangedTopologyVersion(msg.topologyVersion())); + if (!onSend(msg, node.id())) return; @@ -1212,6 +1246,8 @@ public void sendOrderedMessage(ClusterNode node, Object topic, GridCacheMessage if (!onSend(msg, node.id())) return; + msg.lastAffinityChangedTopologyVersion(cctx.exchange().lastAffinityChangedTopologyVersion(msg.topologyVersion())); + int cnt = 0; while (cnt <= retryCnt) { @@ -1268,6 +1304,8 @@ void sendNoRetry(ClusterNode node, if (!onSend(msg, null)) return; + msg.lastAffinityChangedTopologyVersion(cctx.exchange().lastAffinityChangedTopologyVersion(msg.topologyVersion())); + try { cctx.gridIO().sendToGridTopic(node, TOPIC_CACHE, msg, plc); @@ -1330,22 +1368,22 @@ private void addHandler( if (msgIdx != -1) { Map idxClsHandlers0 = msgHandlers.idxClsHandlers; - IgniteBiInClosure[] cacheClsHandlers = idxClsHandlers0.get(hndId); + IgniteBiInClosure[] cacheClsHandlers = idxClsHandlers0.compute(hndId, (key, clsHandlers) -> { + if (clsHandlers == null) + clsHandlers = new IgniteBiInClosure[GridCacheMessage.MAX_CACHE_MSG_LOOKUP_INDEX]; - if (cacheClsHandlers == null) { - cacheClsHandlers = new IgniteBiInClosure[GridCacheMessage.MAX_CACHE_MSG_LOOKUP_INDEX]; + if(clsHandlers[msgIdx] != null) + return null; - idxClsHandlers0.put(hndId, cacheClsHandlers); - } + clsHandlers[msgIdx] = c; + + return clsHandlers; + }); - if (cacheClsHandlers[msgIdx] != null) + if (cacheClsHandlers == null) throw new IgniteException("Duplicate cache message ID found [hndId=" + hndId + ", type=" + type + ']'); - cacheClsHandlers[msgIdx] = c; - - msgHandlers.idxClsHandlers = idxClsHandlers0; - return; } else { @@ -1391,6 +1429,9 @@ private void removeHandlers(MessageHandlers msgHandlers, int hndId) { for (Iterator iter = msgHandlers.clsHandlers.keySet().iterator(); iter.hasNext(); ) { ListenerKey key = iter.next(); + if (key.msgCls.equals(GridDhtAffinityAssignmentResponse.class)) + continue; + if (key.hndId == hndId) iter.remove(); } @@ -1562,7 +1603,7 @@ else if (msg instanceof GridCacheGroupIdMessage) */ static class MessageHandlers { /** Indexed class handlers. */ - volatile Map idxClsHandlers = new HashMap<>(); + volatile Map idxClsHandlers = new ConcurrentHashMap<>(); /** Handler registry. */ ConcurrentMap> diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheLoaderWriterStore.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheLoaderWriterStore.java index 03beaf0973314..cf07b18183734 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheLoaderWriterStore.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheLoaderWriterStore.java @@ -17,6 +17,7 @@ package org.apache.ignite.internal.processors.cache; +import java.io.Closeable; import java.io.Serializable; import java.util.Collection; import java.util.Collections; @@ -26,6 +27,7 @@ import javax.cache.integration.CacheWriter; import org.apache.ignite.cache.store.CacheStore; import org.apache.ignite.internal.util.typedef.internal.S; +import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiInClosure; import org.apache.ignite.lifecycle.LifecycleAware; import org.jetbrains.annotations.Nullable; @@ -81,9 +83,13 @@ CacheWriter writer() { @Override public void stop() { if (ldr instanceof LifecycleAware) ((LifecycleAware)ldr).stop(); + else if (ldr instanceof Closeable) + U.closeQuiet((Closeable)ldr); if (writer instanceof LifecycleAware) ((LifecycleAware)writer).stop(); + else if (writer instanceof Closeable) + U.closeQuiet((Closeable)writer); } /** {@inheritDoc} */ diff --git a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java index 74dabe9a1f7dd..d30d0b866d779 100644 --- a/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java +++ b/modules/core/src/main/java/org/apache/ignite/internal/processors/cache/GridCacheMapEntry.java @@ -22,8 +22,10 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.UUID; import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; import javax.cache.Cache; import javax.cache.expiry.ExpiryPolicy; @@ -33,16 +35,19 @@ import org.apache.ignite.IgniteCheckedException; import org.apache.ignite.IgniteException; import org.apache.ignite.IgniteLogger; +import org.apache.ignite.cache.CacheInterceptor; import org.apache.ignite.cache.eviction.EvictableEntry; -import org.apache.ignite.internal.pagemem.wal.StorageException; +import org.apache.ignite.internal.NodeStoppingException; +import org.apache.ignite.internal.UnregisteredBinaryTypeException; +import org.apache.ignite.internal.UnregisteredClassException; import org.apache.ignite.internal.pagemem.wal.WALPointer; import org.apache.ignite.internal.pagemem.wal.record.DataEntry; import org.apache.ignite.internal.pagemem.wal.record.DataRecord; import org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion; import org.apache.ignite.internal.processors.cache.GridCacheUpdateAtomicResult.UpdateOutcome; import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry; -import org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtLocalPartition; import org.apache.ignite.internal.processors.cache.distributed.dht.atomic.GridDhtAtomicAbstractUpdateFuture; +import org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition; import org.apache.ignite.internal.processors.cache.distributed.near.GridNearCacheEntry; import org.apache.ignite.internal.processors.cache.extras.GridCacheEntryExtras; import org.apache.ignite.internal.processors.cache.extras.GridCacheMvccEntryExtras; @@ -51,6 +56,7 @@ import org.apache.ignite.internal.processors.cache.persistence.CacheDataRow; import org.apache.ignite.internal.processors.cache.persistence.CacheDataRowAdapter; import org.apache.ignite.internal.processors.cache.persistence.DataRegion; +import org.apache.ignite.internal.processors.cache.persistence.StorageException; import org.apache.ignite.internal.processors.cache.query.continuous.CacheContinuousQueryListener; import org.apache.ignite.internal.processors.cache.transactions.IgniteInternalTx; import org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry; @@ -77,6 +83,8 @@ import org.apache.ignite.internal.util.typedef.internal.S; import org.apache.ignite.internal.util.typedef.internal.U; import org.apache.ignite.lang.IgniteBiTuple; +import org.apache.ignite.lang.IgnitePredicate; +import org.apache.ignite.thread.IgniteThread; import org.jetbrains.annotations.Nullable; import static org.apache.ignite.events.EventType.EVT_CACHE_OBJECT_EXPIRED; @@ -88,6 +96,7 @@ import static org.apache.ignite.internal.processors.cache.GridCacheOperation.DELETE; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.TRANSFORM; import static org.apache.ignite.internal.processors.cache.GridCacheOperation.UPDATE; +import static org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtPartitionState.RENTING; import static org.apache.ignite.internal.processors.dr.GridDrType.DR_NONE; /** @@ -109,7 +118,9 @@ public abstract class GridCacheMapEntry extends GridMetadataAwareAdapter impleme /** * NOTE + *
* ==== + *
* Make sure to recalculate this value any time when adding or removing fields from entry. * The size should be count as follows: *
    @@ -117,9 +128,44 @@ public abstract class GridCacheMapEntry extends GridMetadataAwareAdapter impleme *
  • References: 8 each
  • *
  • Each nested object should be analyzed in the same way as above.
  • *
+ * ==== + *
+ *
    + *
  • Reference fields:
      + *
    • 8 : {@link #cctx}
    • + *
    • 8 : {@link #key}
    • + *
    • 8 : {@link #val}
    • + *
    • 8 : {@link #ver}
    • + *
    • 8 : {@link #extras}
    • + *
    • 8 : {@link #lock}
    • + *
    • 8 : {@link GridMetadataAwareAdapter#data}
    • + *
  • + *
  • Primitive fields:
      + *
    • 4 : {@link #hash}
    • + *
    • 1 : {@link #flags}
    • + *
  • + *
  • Extras:
      + *
    • 8 : {@link GridCacheEntryExtras#ttl()}
    • + *
    • 8 : {@link GridCacheEntryExtras#expireTime()}
    • + *
  • + *
  • Version:
      + *
    • 4 : {@link GridCacheVersion#topVer}
    • + *
    • 4 : {@link GridCacheVersion#nodeOrderDrId}
    • + *
    • 8 : {@link GridCacheVersion#order}
    • + *
  • + *
  • Key:
      + *
    • 8 : {@link CacheObjectAdapter#val}
    • + *
    • 8 : {@link CacheObjectAdapter#valBytes}
    • + *
    • 4 : {@link KeyCacheObjectImpl#part}
    • + *
  • + *
  • Value:
      + *
    • 8 : {@link CacheObjectAdapter#val}
    • + *
    • 8 : {@link CacheObjectAdapter#valBytes}
    • + *
  • + *
*/ - // 7 * 8 /*references*/ + 2 * 8 /*long*/ + 1 * 4 /*int*/ + 1 * 1 /*byte*/ + array at parent = 85 - private static final int SIZE_OVERHEAD = 85 /*entry*/ + 32 /* version */ + 4 * 7 /* key + val */; + private static final int SIZE_OVERHEAD = 8 * 8 /* references */ + 5 /* primitives */ + 16 /* extras */ + + 16 /* version */ + 20 /* key */ + 16 /* value */; /** Static logger to avoid re-creation. Made static for test purpose. */ protected static final AtomicReference logRef = new AtomicReference<>(); @@ -139,10 +185,6 @@ public abstract class GridCacheMapEntry extends GridMetadataAwareAdapter impleme @GridToStringInclude protected CacheObject val; - /** Start version. */ - @GridToStringInclude - protected final long startVer; - /** Version. */ @GridToStringInclude protected GridCacheVersion ver; @@ -159,6 +201,10 @@ public abstract class GridCacheMapEntry extends GridMetadataAwareAdapter impleme @GridToStringExclude private final ReentrantLock lock = new ReentrantLock(); + /** Read Lock for continuous query listener */ + @GridToStringExclude + private final Lock listenerLock; + /** * Flags: *