From a9e7ab9f21178023ca7d05e4d717e08966fc645d Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 15 Mar 2024 10:34:51 +0100 Subject: [PATCH 01/17] HDDS-10496. Fetch dependencies for cache using actual build (#6359) (cherry picked from commit b82a2a28426e26b72500fc77ed203ff4eea57a45) --- .github/workflows/populate-cache.yml | 2 +- hadoop-ozone/ozonefs-hadoop2/pom.xml | 1 + hadoop-ozone/ozonefs-hadoop3-client/pom.xml | 2 ++ hadoop-ozone/ozonefs-hadoop3/pom.xml | 1 + hadoop-ozone/ozonefs-shaded/pom.xml | 1 + hadoop-ozone/pom.xml | 7 +++++++ pom.xml | 11 +++++++++++ 7 files changed, 24 insertions(+), 1 deletion(-) diff --git a/.github/workflows/populate-cache.yml b/.github/workflows/populate-cache.yml index 47d4d30f6f82..d4c9cd8120ab 100644 --- a/.github/workflows/populate-cache.yml +++ b/.github/workflows/populate-cache.yml @@ -54,7 +54,7 @@ jobs: - name: Fetch dependencies if: steps.restore-cache.outputs.cache-hit != 'true' - run: mvn --batch-mode --fail-never --no-transfer-progress --show-version dependency:go-offline + run: mvn --batch-mode --fail-never --no-transfer-progress --show-version -Pgo-offline -Pdist clean verify - name: Delete Ozone jars from repo if: steps.restore-cache.outputs.cache-hit != 'true' diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index c24aea74c417..3fe4f014eb1a 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -128,6 +128,7 @@ unpack + ${maven.shade.skip} META-INF/versions/**/*.* diff --git a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml index 3cb0511a58c2..df041cf844bf 100644 --- a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml @@ -54,6 +54,7 @@ unpack + ${maven.shade.skip} META-INF/versions/**/*.* @@ -77,6 +78,7 @@ shade + ${maven.shade.skip} diff --git a/hadoop-ozone/ozonefs-hadoop3/pom.xml b/hadoop-ozone/ozonefs-hadoop3/pom.xml index b9dec3fd1eef..0f15eaf8363b 100644 --- a/hadoop-ozone/ozonefs-hadoop3/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3/pom.xml @@ -74,6 +74,7 @@ unpack + ${maven.shade.skip} META-INF/versions/**/*.* diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml index 50464907f1b3..05caaf934de6 100644 --- a/hadoop-ozone/ozonefs-shaded/pom.xml +++ b/hadoop-ozone/ozonefs-shaded/pom.xml @@ -91,6 +91,7 @@ shade + ${maven.shade.skip} diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 370401b6dc69..686d3bbf9a8a 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -408,6 +408,13 @@ ozonefs-hadoop3-client + + go-offline + + ozonefs-shaded + ozonefs-hadoop2 + + build-with-recon diff --git a/pom.xml b/pom.xml index a78927e42f27..f913338f2c8f 100644 --- a/pom.xml +++ b/pom.xml @@ -2238,6 +2238,17 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + + go-offline + + void + true + true + true + true + true + + client From 2f5db3aea1f4ee0430ab90280be2b58bb2ed0b34 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Wed, 20 Mar 2024 07:48:22 +0800 Subject: [PATCH 02/17] HDDS-9802. Tool to fix corrupted snapshot chain (#6386) (cherry picked from commit e9073166da9cb16e0529df9f59818842cc7fbbcb) --- .../org/apache/hadoop/hdds/utils/IOUtils.java | 4 +- .../hadoop/ozone/om/helpers/SnapshotInfo.java | 4 +- hadoop-ozone/dist/src/shell/ozone/ozone | 6 + .../hadoop/ozone/repair/OzoneRepair.java | 64 ++++++ .../apache/hadoop/ozone/repair/RDBRepair.java | 58 +++++ .../ozone/repair/om/SnapshotRepair.java | 200 ++++++++++++++++++ .../hadoop/ozone/repair/om/package-info.java | 22 ++ .../hadoop/ozone/repair/package-info.java | 22 ++ 8 files changed, 376 insertions(+), 4 deletions(-) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/package-info.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/package-info.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java index 109f4b3df054..c3e8de68981a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/IOUtils.java @@ -68,7 +68,7 @@ public static void close(Logger logger, AutoCloseable... closeables) { * Close each argument, catching exceptions and logging them as error. */ public static void close(Logger logger, - Collection closeables) { + Collection closeables) { if (closeables == null) { return; } @@ -95,7 +95,7 @@ public static void closeQuietly(AutoCloseable... closeables) { /** * Close each argument, swallowing exceptions. */ - public static void closeQuietly(Collection closeables) { + public static void closeQuietly(Collection closeables) { close(null, closeables); } } diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index f7a39ea10001..55e3e668df11 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -49,8 +49,8 @@ * This class is used for storing info related to Snapshots. * * Each snapshot created has an associated SnapshotInfo entry - * containing the snapshotid, snapshot path, - * snapshot checkpoint directory, previous snapshotid + * containing the snapshotId, snapshot path, + * snapshot checkpoint directory, previous snapshotId * for the snapshot path & global amongst other necessary fields. */ public final class SnapshotInfo implements Auditable, CopyObject { diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 752d31153506..ae0884458477 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -59,6 +59,7 @@ function ozone_usage ozone_add_subcommand "dtutil" client "operations related to delegation tokens" ozone_add_subcommand "admin" client "Ozone admin tool" ozone_add_subcommand "debug" client "Ozone debug tool" + ozone_add_subcommand "repair" client "Ozone repair tool" ozone_add_subcommand "checknative" client "checks if native libraries are loaded" ozone_generate_usage "${OZONE_SHELL_EXECNAME}" false @@ -236,6 +237,11 @@ function ozonecmd_case OZONE_DEBUG_OPTS="${OZONE_DEBUG_OPTS} ${OZONE_MODULE_ACCESS_ARGS}" OZONE_RUN_ARTIFACT_NAME="ozone-tools" ;; + repair) + OZONE_CLASSNAME=org.apache.hadoop.ozone.repair.OzoneRepair + OZONE_DEBUG_OPTS="${OZONE_DEBUG_OPTS} ${OZONE_MODULE_ACCESS_ARGS}" + OZONE_RUN_ARTIFACT_NAME="ozone-tools" + ;; checknative) OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.checknative.CheckNative OZONE_RUN_ARTIFACT_NAME="ozone-tools" diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java new file mode 100644 index 000000000000..3bbbded58028 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import picocli.CommandLine; + +/** + * Ozone Repair Command line tool. + */ +@CommandLine.Command(name = "ozone repair", + description = "Operational tool to repair Ozone", + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +public class OzoneRepair extends GenericCli { + + private OzoneConfiguration ozoneConf; + + public OzoneRepair() { + super(OzoneRepair.class); + } + + @VisibleForTesting + public OzoneRepair(OzoneConfiguration configuration) { + super(OzoneRepair.class); + this.ozoneConf = configuration; + } + + public OzoneConfiguration getOzoneConf() { + if (ozoneConf == null) { + ozoneConf = createOzoneConfiguration(); + } + return ozoneConf; + } + + /** + * Main for the Ozone Repair shell Command handling. + * + * @param argv - System Args Strings[] + * @throws Exception + */ + public static void main(String[] argv) throws Exception { + new OzoneRepair().run(argv); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java new file mode 100644 index 000000000000..0f36934ec14d --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RDBRepair.java @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair; + +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Ozone Repair CLI for RocksDB. + */ +@CommandLine.Command(name = "ldb", + description = "Operational tool to repair RocksDB table.") +@MetaInfServices(SubcommandWithParent.class) +public class RDBRepair implements Callable, SubcommandWithParent { + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + @CommandLine.Option(names = {"--db"}, + required = true, + description = "Database File Path") + private String dbPath; + + public String getDbPath() { + return dbPath; + } + + @Override + public Void call() { + GenericCli.missingSubcommand(spec); + return null; + } + + @Override + public Class getParentType() { + return OzoneRepair.class; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java new file mode 100644 index 000000000000..ec5e2f8f9366 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java @@ -0,0 +1,200 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.om; + +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.StringCodec; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; +import org.apache.hadoop.ozone.debug.RocksDBUtils; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.repair.RDBRepair; +import org.apache.hadoop.ozone.shell.bucket.BucketUri; +import org.kohsuke.MetaInfServices; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDBException; +import picocli.CommandLine; +import picocli.CommandLine.Model.CommandSpec; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.Callable; + +import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; +import static org.apache.hadoop.ozone.OzoneConsts.SNAPSHOT_INFO_TABLE; + +/** + * Tool to repair snapshotInfoTable in case it has corrupted entries. + */ +@CommandLine.Command( + name = "snapshot", + description = "CLI to update global and path previous snapshot for a snapshot in case snapshot chain is corrupted." +) +@MetaInfServices(SubcommandWithParent.class) +public class SnapshotRepair implements Callable, SubcommandWithParent { + + @CommandLine.Spec + private static CommandSpec spec; + + @CommandLine.ParentCommand + private RDBRepair parent; + + @CommandLine.Mixin + private BucketUri bucketUri; + + @CommandLine.Parameters(description = "Snapshot name to update", index = "1") + private String snapshotName; + + @CommandLine.Option(names = {"--global-previous", "--gp"}, + required = true, + description = "Global previous snapshotId to set for the given snapshot") + private UUID globalPreviousSnapshotId; + + @CommandLine.Option(names = {"--path-previous", "--pp"}, + required = true, + description = "Path previous snapshotId to set for the given snapshot") + private UUID pathPreviousSnapshotId; + + @CommandLine.Option(names = {"--dry-run"}, + required = true, + description = "To dry-run the command.", defaultValue = "true") + private boolean dryRun; + + @Override + public Void call() throws Exception { + List cfHandleList = new ArrayList<>(); + List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath()); + + try (ManagedRocksDB db = ManagedRocksDB.open(parent.getDbPath(), cfDescList, cfHandleList)) { + ColumnFamilyHandle snapshotInfoCfh = getSnapshotInfoCfh(cfHandleList); + if (snapshotInfoCfh == null) { + System.err.println(SNAPSHOT_INFO_TABLE + " is not in a column family in DB for the given path."); + return null; + } + + String snapshotInfoTableKey = SnapshotInfo.getTableKey(bucketUri.getValue().getVolumeName(), + bucketUri.getValue().getBucketName(), snapshotName); + + SnapshotInfo snapshotInfo = getSnapshotInfo(db, snapshotInfoCfh, snapshotInfoTableKey); + if (snapshotInfo == null) { + System.err.println(snapshotName + " does not exist for given bucketUri: " + OM_KEY_PREFIX + + bucketUri.getValue().getVolumeName() + OM_KEY_PREFIX + bucketUri.getValue().getBucketName()); + return null; + } + + // snapshotIdSet is the set of the all existed snapshots ID to make that the provided global previous and path + // previous exist and after the update snapshot does not point to ghost snapshot. + Set snapshotIdSet = getSnapshotIdSet(db, snapshotInfoCfh); + + if (Objects.equals(snapshotInfo.getSnapshotId(), globalPreviousSnapshotId)) { + System.err.println("globalPreviousSnapshotId: '" + globalPreviousSnapshotId + + "' is equal to given snapshot's ID: '" + snapshotInfo.getSnapshotId() + "'."); + return null; + } + + if (Objects.equals(snapshotInfo.getSnapshotId(), pathPreviousSnapshotId)) { + System.err.println("pathPreviousSnapshotId: '" + pathPreviousSnapshotId + + "' is equal to given snapshot's ID: '" + snapshotInfo.getSnapshotId() + "'."); + return null; + } + + if (!snapshotIdSet.contains(globalPreviousSnapshotId)) { + System.err.println("globalPreviousSnapshotId: '" + globalPreviousSnapshotId + + "' does not exist in snapshotInfoTable."); + return null; + } + + if (!snapshotIdSet.contains(pathPreviousSnapshotId)) { + System.err.println("pathPreviousSnapshotId: '" + pathPreviousSnapshotId + + "' does not exist in snapshotInfoTable."); + return null; + } + + snapshotInfo.setGlobalPreviousSnapshotId(globalPreviousSnapshotId); + snapshotInfo.setPathPreviousSnapshotId(pathPreviousSnapshotId); + + if (dryRun) { + System.out.println("SnapshotInfo would be updated to : " + snapshotInfo); + } else { + byte[] snapshotInfoBytes = SnapshotInfo.getCodec().toPersistedFormat(snapshotInfo); + db.get() + .put(snapshotInfoCfh, StringCodec.get().toPersistedFormat(snapshotInfoTableKey), snapshotInfoBytes); + + System.out.println("Snapshot Info is updated to : " + + getSnapshotInfo(db, snapshotInfoCfh, snapshotInfoTableKey)); + } + } catch (RocksDBException exception) { + System.err.println("Failed to update the RocksDB for the given path: " + parent.getDbPath()); + System.err.println( + "Make sure that Ozone entity (OM, SCM or DN) is not running for the give dbPath and current host."); + System.err.println(exception); + } finally { + IOUtils.closeQuietly(cfHandleList); + } + + return null; + } + + private Set getSnapshotIdSet(ManagedRocksDB db, ColumnFamilyHandle snapshotInfoCfh) + throws IOException { + Set snapshotIdSet = new HashSet<>(); + try (ManagedRocksIterator iterator = new ManagedRocksIterator(db.get().newIterator(snapshotInfoCfh))) { + iterator.get().seekToFirst(); + + while (iterator.get().isValid()) { + SnapshotInfo snapshotInfo = SnapshotInfo.getCodec().fromPersistedFormat(iterator.get().value()); + snapshotIdSet.add(snapshotInfo.getSnapshotId()); + iterator.get().next(); + } + } + return snapshotIdSet; + } + + private ColumnFamilyHandle getSnapshotInfoCfh(List cfHandleList) throws RocksDBException { + byte[] nameBytes = SNAPSHOT_INFO_TABLE.getBytes(StandardCharsets.UTF_8); + + for (ColumnFamilyHandle cf : cfHandleList) { + if (Arrays.equals(cf.getName(), nameBytes)) { + return cf; + } + } + + return null; + } + + private SnapshotInfo getSnapshotInfo(ManagedRocksDB db, ColumnFamilyHandle snapshotInfoCfh, String snapshotInfoLKey) + throws IOException, RocksDBException { + byte[] bytes = db.get().get(snapshotInfoCfh, StringCodec.get().toPersistedFormat(snapshotInfoLKey)); + return bytes != null ? SnapshotInfo.getCodec().fromPersistedFormat(bytes) : null; + } + + @Override + public Class getParentType() { + return RDBRepair.class; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/package-info.java new file mode 100644 index 000000000000..9e2324a4a6f8 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * OM related repair tools. + */ +package org.apache.hadoop.ozone.repair.om; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/package-info.java new file mode 100644 index 000000000000..bd382d04cf79 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Ozone Repair tools. + */ +package org.apache.hadoop.ozone.repair; From 6a31ccd5011aad703473e48d7f623f9a6bcbf673 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 20 Mar 2024 17:43:55 +0800 Subject: [PATCH 03/17] HDDS-10552. Downgrade Surefire to 3.0.0-M4 (#6406) (cherry picked from commit ca8f1f76b20d4f38b5e939494bb49b1fc4181c9e) --- hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml | 5 ----- hadoop-ozone/integration-test/pom.xml | 5 ----- pom.xml | 2 +- 3 files changed, 1 insertion(+), 11 deletions(-) diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml index 3d351f390d56..c993ba32423e 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml @@ -43,11 +43,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> junit-jupiter-migrationsupport test - - org.junit.jupiter - junit-jupiter-engine - test - org.junit.vintage junit-vintage-engine diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index 62b0f1cc7155..b3241c6c98b2 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -139,11 +139,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> junit-jupiter-migrationsupport test - - org.junit.jupiter - junit-jupiter-engine - test - org.junit.vintage junit-vintage-engine diff --git a/pom.xml b/pom.xml index f913338f2c8f..5afd82cbc2ef 100644 --- a/pom.xml +++ b/pom.xml @@ -257,7 +257,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs -Xmx4096m -XX:+HeapDumpOnOutOfMemoryError flaky | org.apache.ozone.test.FlakyTest | slow | org.apache.ozone.test.SlowTest | unhealthy | org.apache.ozone.test.UnhealthyTest - 3.0.0-M5 + 3.0.0-M4 ${maven-surefire-plugin.version} ${maven-surefire-plugin.version} From fe73d7099cb64cc90c0cadc35580df06715f4a3a Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran <47532440+swamirishi@users.noreply.github.com> Date: Thu, 21 Mar 2024 21:31:50 +0800 Subject: [PATCH 04/17] HDDS-10477. Make Rocksdb tools native lib compatible with all chipset with the same arch (#6341) (cherry picked from commit e3a7224a81101a6d2dee5e036c3e98e3a2e8ca49) --- hadoop-hdds/rocks-native/pom.xml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index a7b1c367ac52..1b29bfcbd6af 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -325,7 +325,8 @@ - + + From 2f002cdf9c6cd182f391405ecdc4eb9b72934783 Mon Sep 17 00:00:00 2001 From: Aswin Shakil Balasubramanian Date: Sat, 27 Jan 2024 07:19:22 -0800 Subject: [PATCH 05/17] HDDS-9426. Calculate Exclusive size for deep cleaned snapshot's deleted directories (#6099) (cherry picked from commit 79d3c873f5cb69825d042f533033f94caa06717d) --- .../src/main/resources/ozone-default.xml | 19 + .../apache/hadoop/ozone/om/OMConfigKeys.java | 12 + .../hadoop/ozone/om/helpers/SnapshotInfo.java | 45 +- .../ozone/om/helpers/TestOmSnapshotInfo.java | 11 +- .../TestSnapshotDirectoryCleaningService.java | 272 +++++++++ .../src/main/proto/OmClientProtocol.proto | 23 +- .../apache/hadoop/ozone/om/KeyManager.java | 7 + .../hadoop/ozone/om/KeyManagerImpl.java | 30 + .../snapshot/OMSnapshotPurgeRequest.java | 21 +- .../OMSnapshotSetPropertyRequest.java | 35 +- .../service/AbstractKeyDeletingService.java | 100 ++++ .../ozone/om/service/KeyDeletingService.java | 151 ++--- .../SnapshotDirectoryCleaningService.java | 515 ++++++++++++++++++ ...SnapshotSetPropertyRequestAndResponse.java | 8 +- .../om/service/TestKeyDeletingService.java | 9 +- 15 files changed, 1098 insertions(+), 160 deletions(-) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 2d410fecd96d..c70a9630f039 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -3532,6 +3532,25 @@ + + ozone.snapshot.directory.service.timeout + 300s + OZONE, PERFORMANCE, OM + + Timeout value for SnapshotDirectoryCleaningService. + + + + + ozone.snapshot.directory.service.interval + 24h + OZONE, PERFORMANCE, OM + + The time interval between successive SnapshotDirectoryCleaningService + thread run. + + + ozone.scm.event.ContainerReport.thread.pool.size 10 diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 58f341b74aca..5dd7579eb916 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -390,6 +390,18 @@ private OMConfigKeys() { public static final String OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT = "60s"; + /** + * Configuration properties for Snapshot Directory Service. + */ + public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL = + "ozone.snapshot.directory.service.interval"; + public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT + = "24h"; + public static final String OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT = + "ozone.snapshot.directory.service.timeout"; + public static final String + OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT_DEFAULT = "300s"; + public static final String OZONE_PATH_DELETING_LIMIT_PER_TASK = "ozone.path.deleting.limit.per.task"; // default is 6000 taking account of 32MB buffer size, and assuming diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 55e3e668df11..49e85a4c7201 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -123,6 +123,7 @@ public static SnapshotStatus valueOf(SnapshotStatusProto status) { private long referencedReplicatedSize; private long exclusiveSize; private long exclusiveReplicatedSize; + private boolean deepCleanedDeletedDir; /** * Private constructor, constructed via builder. @@ -162,7 +163,8 @@ private SnapshotInfo(UUID snapshotId, long referencedSize, long referencedReplicatedSize, long exclusiveSize, - long exclusiveReplicatedSize) { + long exclusiveReplicatedSize, + boolean deepCleanedDeletedDir) { this.snapshotId = snapshotId; this.name = name; this.volumeName = volumeName; @@ -181,6 +183,7 @@ private SnapshotInfo(UUID snapshotId, this.referencedReplicatedSize = referencedReplicatedSize; this.exclusiveSize = exclusiveSize; this.exclusiveReplicatedSize = exclusiveReplicatedSize; + this.deepCleanedDeletedDir = deepCleanedDeletedDir; } public void setName(String name) { @@ -285,7 +288,7 @@ public void setSstFiltered(boolean sstFiltered) { } public SnapshotInfo.Builder toBuilder() { - return new SnapshotInfo.Builder() + return new Builder() .setSnapshotId(snapshotId) .setName(name) .setVolumeName(volumeName) @@ -302,7 +305,8 @@ public SnapshotInfo.Builder toBuilder() { .setReferencedSize(referencedSize) .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) - .setExclusiveReplicatedSize(exclusiveReplicatedSize); + .setExclusiveReplicatedSize(exclusiveReplicatedSize) + .setDeepCleanedDeletedDir(deepCleanedDeletedDir); } /** @@ -327,6 +331,7 @@ public static class Builder { private long referencedReplicatedSize; private long exclusiveSize; private long exclusiveReplicatedSize; + private boolean deepCleanedDeletedDir; public Builder() { // default values @@ -423,6 +428,11 @@ public Builder setExclusiveReplicatedSize(long exclusiveReplicatedSize) { return this; } + public Builder setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { + this.deepCleanedDeletedDir = deepCleanedDeletedDir; + return this; + } + public SnapshotInfo build() { Preconditions.checkNotNull(name); return new SnapshotInfo( @@ -443,7 +453,8 @@ public SnapshotInfo build() { referencedSize, referencedReplicatedSize, exclusiveSize, - exclusiveReplicatedSize + exclusiveReplicatedSize, + deepCleanedDeletedDir ); } } @@ -465,7 +476,8 @@ public OzoneManagerProtocolProtos.SnapshotInfo getProtobuf() { .setReferencedSize(referencedSize) .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) - .setExclusiveReplicatedSize(exclusiveReplicatedSize); + .setExclusiveReplicatedSize(exclusiveReplicatedSize) + .setDeepCleanedDeletedDir(deepCleanedDeletedDir); if (pathPreviousSnapshotId != null) { sib.setPathPreviousSnapshotID(toProtobuf(pathPreviousSnapshotId)); @@ -538,6 +550,11 @@ public static SnapshotInfo getFromProtobuf( snapshotInfoProto.getExclusiveReplicatedSize()); } + if (snapshotInfoProto.hasDeepCleanedDeletedDir()) { + osib.setDeepCleanedDeletedDir( + snapshotInfoProto.getDeepCleanedDeletedDir()); + } + osib.setSnapshotPath(snapshotInfoProto.getSnapshotPath()) .setCheckpointDir(snapshotInfoProto.getCheckpointDir()) .setDbTxSequenceNumber(snapshotInfoProto.getDbTxSequenceNumber()); @@ -622,6 +639,14 @@ public long getExclusiveReplicatedSize() { return exclusiveReplicatedSize; } + public boolean getDeepCleanedDeletedDir() { + return deepCleanedDeletedDir; + } + + public void setDeepCleanedDeletedDir(boolean deepCleanedDeletedDir) { + this.deepCleanedDeletedDir = deepCleanedDeletedDir; + } + /** * Generate default name of snapshot, (used if user doesn't provide one). */ @@ -655,7 +680,8 @@ public static SnapshotInfo newInstance(String volumeName, .setSnapshotPath(volumeName + OM_KEY_PREFIX + bucketName) .setVolumeName(volumeName) .setBucketName(bucketName) - .setDeepClean(true); + .setDeepClean(false) + .setDeepCleanedDeletedDir(false); if (snapshotId != null) { builder.setCheckpointDir(getCheckpointDirName(snapshotId)); @@ -688,7 +714,8 @@ public boolean equals(Object o) { referencedSize == that.referencedSize && referencedReplicatedSize == that.referencedReplicatedSize && exclusiveSize == that.exclusiveSize && - exclusiveReplicatedSize == that.exclusiveReplicatedSize; + exclusiveReplicatedSize == that.exclusiveReplicatedSize && + deepCleanedDeletedDir == that.deepCleanedDeletedDir; } @Override @@ -699,7 +726,7 @@ public int hashCode() { globalPreviousSnapshotId, snapshotPath, checkpointDir, deepClean, sstFiltered, referencedSize, referencedReplicatedSize, - exclusiveSize, exclusiveReplicatedSize); + exclusiveSize, exclusiveReplicatedSize, deepCleanedDeletedDir); } /** @@ -726,6 +753,7 @@ public SnapshotInfo copyObject() { .setReferencedReplicatedSize(referencedReplicatedSize) .setExclusiveSize(exclusiveSize) .setExclusiveReplicatedSize(exclusiveReplicatedSize) + .setDeepCleanedDeletedDir(deepCleanedDeletedDir) .build(); } @@ -750,6 +778,7 @@ public String toString() { ", referencedReplicatedSize: '" + referencedReplicatedSize + '\'' + ", exclusiveSize: '" + exclusiveSize + '\'' + ", exclusiveReplicatedSize: '" + exclusiveReplicatedSize + '\'' + + ", deepCleanedDeletedDir: '" + deepCleanedDeletedDir + '\'' + '}'; } } diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java index ed3f96efb912..50fef3ffd837 100644 --- a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java +++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmSnapshotInfo.java @@ -66,12 +66,13 @@ private SnapshotInfo createSnapshotInfo() { .setSnapshotPath(SNAPSHOT_PATH) .setCheckpointDir(CHECKPOINT_DIR) .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER) - .setDeepClean(true) + .setDeepClean(false) .setSstFiltered(false) .setReferencedSize(2000L) .setReferencedReplicatedSize(6000L) .setExclusiveSize(1000L) .setExclusiveReplicatedSize(3000L) + .setDeepCleanedDeletedDir(false) .build(); } @@ -89,12 +90,13 @@ private OzoneManagerProtocolProtos.SnapshotInfo createSnapshotInfoProto() { .setSnapshotPath(SNAPSHOT_PATH) .setCheckpointDir(CHECKPOINT_DIR) .setDbTxSequenceNumber(DB_TX_SEQUENCE_NUMBER) - .setDeepClean(true) + .setDeepClean(false) .setSstFiltered(false) .setReferencedSize(2000L) .setReferencedReplicatedSize(6000L) .setExclusiveSize(1000L) .setExclusiveReplicatedSize(3000L) + .setDeepCleanedDeletedDir(false) .build(); } @@ -140,6 +142,9 @@ public void testSnapshotInfoToProto() { Assertions.assertEquals( snapshotInfoEntryExpected.getExclusiveReplicatedSize(), snapshotInfoEntryActual.getExclusiveReplicatedSize()); + Assertions.assertEquals( + snapshotInfoEntryExpected.getDeepCleanedDeletedDir(), + snapshotInfoEntryActual.getDeepCleanedDeletedDir()); Assertions.assertEquals(snapshotInfoEntryExpected, snapshotInfoEntryActual); } @@ -176,6 +181,8 @@ public void testSnapshotInfoProtoToSnapshotInfo() { snapshotInfoActual.getExclusiveSize()); Assertions.assertEquals(snapshotInfoExpected.getExclusiveReplicatedSize(), snapshotInfoActual.getExclusiveReplicatedSize()); + Assertions.assertEquals(snapshotInfoExpected.getDeepCleanedDeletedDir(), + snapshotInfoActual.getDeepCleanedDeletedDir()); Assertions.assertEquals(snapshotInfoExpected, snapshotInfoActual); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java new file mode 100644 index 000000000000..6b39b76c5466 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSnapshotDirectoryCleaningService.java @@ -0,0 +1,272 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.TestDataUtil; +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; +import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.fail; + +/** + * Test Snapshot Directory Service. + */ +@Timeout(300) +public class TestSnapshotDirectoryCleaningService { + + private static final Logger LOG = + LoggerFactory.getLogger(TestSnapshotDirectoryCleaningService.class); + + private static MiniOzoneCluster cluster; + private static FileSystem fs; + private static String volumeName; + private static String bucketName; + private static OzoneClient client; + + @BeforeAll + public static void init() throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + conf.setInt(OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, 2500); + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 2500, + TimeUnit.MILLISECONDS); + conf.setBoolean(OZONE_ACL_ENABLED, true); + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(3) + .build(); + cluster.waitForClusterToBeReady(); + client = cluster.newClient(); + + // create a volume and a bucket to be used by OzoneFileSystem + OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client, + BucketLayout.FILE_SYSTEM_OPTIMIZED); + volumeName = bucket.getVolumeName(); + bucketName = bucket.getName(); + + String rootPath = String.format("%s://%s.%s/", + OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName); + + // Set the fs.defaultFS and start the filesystem + conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + // Set the number of keys to be processed during batch operate. + conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); + + fs = FileSystem.get(conf); + } + + @AfterAll + public static void teardown() { + IOUtils.closeQuietly(client); + if (cluster != null) { + cluster.shutdown(); + } + IOUtils.closeQuietly(fs); + } + + @AfterEach + public void cleanup() { + try { + Path root = new Path("/"); + FileStatus[] fileStatuses = fs.listStatus(root); + for (FileStatus fileStatus : fileStatuses) { + fs.delete(fileStatus.getPath(), true); + } + } catch (IOException ex) { + fail("Failed to cleanup files."); + } + } + + @SuppressWarnings("checkstyle:LineLength") + @Test + public void testExclusiveSizeWithDirectoryDeepClean() throws Exception { + + Table deletedDirTable = + cluster.getOzoneManager().getMetadataManager().getDeletedDirTable(); + Table keyTable = + cluster.getOzoneManager().getMetadataManager() + .getKeyTable(BucketLayout.FILE_SYSTEM_OPTIMIZED); + Table dirTable = + cluster.getOzoneManager().getMetadataManager().getDirectoryTable(); + Table deletedKeyTable = + cluster.getOzoneManager().getMetadataManager().getDeletedTable(); + Table snapshotInfoTable = + cluster.getOzoneManager().getMetadataManager().getSnapshotInfoTable(); + SnapshotDirectoryCleaningService snapshotDirectoryCleaningService = + cluster.getOzoneManager().getKeyManager().getSnapshotDirectoryService(); + + /* DirTable + /v/b/snapDir + /v/b/snapDir/appRoot0-2/ + /v/b/snapDir/appRoot0-2/parentDir0-2/ + FileTable + /v/b/snapDir/testKey0 - testKey4 = 5 keys + /v/b/snapDir/appRoot0-2/parentDir0-2/childFile = 9 keys + /v/b/snapDir/appRoot0/parentDir0-2/childFile0-4 = 15 keys + */ + + Path root = new Path("/snapDir"); + // Create parent dir from root. + fs.mkdirs(root); + + // Add 5 files inside root dir + // Creates /v/b/snapDir/testKey0 - testKey4 + for (int i = 0; i < 5; i++) { + Path path = new Path(root, "testKey" + i); + try (FSDataOutputStream stream = fs.create(path)) { + stream.write(1); + } + } + + // Creates /v/b/snapDir/appRoot0-2/parentDir0-2/childFile + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + Path appRoot = new Path(root, "appRoot" + j); + Path parent = new Path(appRoot, "parentDir" + i); + Path child = new Path(parent, "childFile"); + try (FSDataOutputStream stream = fs.create(child)) { + stream.write(1); + } + } + } + + assertTableRowCount(keyTable, 14); + assertTableRowCount(dirTable, 13); + // Create snapshot + client.getObjectStore().createSnapshot(volumeName, bucketName, "snap1"); + + // Creates /v/b/snapDir/appRoot0/parentDir0-2/childFile0-4 + for (int i = 0; i < 3; i++) { + Path appRoot = new Path(root, "appRoot0"); + Path parent = new Path(appRoot, "parentDir" + i); + for (int j = 0; j < 5; j++) { + Path child = new Path(parent, "childFile" + j); + try (FSDataOutputStream stream = fs.create(child)) { + stream.write(1); + } + } + } + + for (int i = 5; i < 10; i++) { + Path path = new Path(root, "testKey" + i); + try (FSDataOutputStream stream = fs.create(path)) { + stream.write(1); + } + } + + assertTableRowCount(deletedDirTable, 0); + assertTableRowCount(keyTable, 34); + assertTableRowCount(dirTable, 13); + Path appRoot0 = new Path(root, "appRoot0"); + // Only parentDir0-2/childFile under appRoot0 is exclusive for snap1 + fs.delete(appRoot0, true); + assertTableRowCount(deletedDirTable, 1); + client.getObjectStore().createSnapshot(volumeName, bucketName, "snap2"); + + // Delete testKey0-9 + for (int i = 0; i < 10; i++) { + Path testKey = new Path(root, "testKey" + i); + fs.delete(testKey, false); + } + + fs.delete(root, true); + assertTableRowCount(deletedKeyTable, 10); + client.getObjectStore().createSnapshot(volumeName, bucketName, "snap3"); + long prevRunCount = snapshotDirectoryCleaningService.getRunCount().get(); + GenericTestUtils.waitFor(() -> snapshotDirectoryCleaningService.getRunCount().get() + > prevRunCount + 1, 100, 10000); + + Thread.sleep(2000); + Map expectedSize = new HashMap() {{ + // /v/b/snapDir/appRoot0/parentDir0-2/childFile contribute + // exclusive size, /v/b/snapDir/appRoot0/parentDir0-2/childFile0-4 + // are deep cleaned and hence don't contribute to size. + put("snap1", 3L); + // Only testKey5-9 contribute to the exclusive size + put("snap2", 5L); + put("snap3", 0L); + }}; + Thread.sleep(500); + try (TableIterator> + iterator = snapshotInfoTable.iterator()) { + while (iterator.hasNext()) { + Table.KeyValue snapshotEntry = iterator.next(); + String snapshotName = snapshotEntry.getValue().getName(); + assertEquals(expectedSize.get(snapshotName), snapshotEntry.getValue(). + getExclusiveSize()); + // Since for the test we are using RATIS/THREE + assertEquals(expectedSize.get(snapshotName) * 3, + snapshotEntry.getValue().getExclusiveReplicatedSize()); + + } + } + } + + private void assertTableRowCount(Table table, int count) + throws TimeoutException, InterruptedException { + GenericTestUtils.waitFor(() -> assertTableRowCount(count, table), 1000, + 120000); // 2 minutes + } + + private boolean assertTableRowCount(int expectedCount, + Table table) { + long count = 0L; + try { + count = cluster.getOzoneManager().getMetadataManager() + .countRowsInTable(table); + LOG.info("{} actual row count={}, expectedCount={}", table.getName(), + count, expectedCount); + } catch (IOException ex) { + fail("testDoubleBuffer failed with: " + ex); + } + return count == expectedCount; + } +} diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index 9efb64a2a4d2..8b931c49c961 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -522,7 +522,7 @@ enum Status { UNAUTHORIZED = 91; S3_SECRET_ALREADY_EXISTS = 92; - + INVALID_PATH = 93; TOO_MANY_BUCKETS = 94; @@ -851,6 +851,7 @@ message SnapshotInfo { // snapshot exclusive size after replication optional uint64 exclusiveReplicatedSize = 18; // note: shared sizes can be calculated from: referenced - exclusive + optional bool deepCleanedDeletedDir = 19; } message SnapshotDiffJobProto { @@ -1894,17 +1895,27 @@ message SnapshotMoveKeyInfos { message SnapshotPurgeRequest { repeated string snapshotDBKeys = 1; - repeated string updatedSnapshotDBKey = 2; + repeated string updatedSnapshotDBKey = 2 [deprecated = true]; } message SetSnapshotPropertyRequest { - optional SnapshotProperty snapshotProperty = 1; + optional SnapshotProperty snapshotProperty = 1 [deprecated = true]; + optional string snapshotKey = 2; + optional SnapshotSize snapshotSize = 3; + optional bool deepCleanedDeletedDir = 4; + optional bool deepCleanedDeletedKey = 5; } +// SnapshotProperty in entirely deprecated, Keeping it here for proto.lock compatibility message SnapshotProperty { - optional string snapshotKey = 1; - optional uint64 exclusiveSize = 2; - optional uint64 exclusiveReplicatedSize = 3; + optional string snapshotKey = 1 [deprecated = true]; + optional uint64 exclusiveSize = 2 [deprecated = true]; + optional uint64 exclusiveReplicatedSize = 3 [deprecated = true]; +} + +message SnapshotSize { + optional uint64 exclusiveSize = 1; + optional uint64 exclusiveReplicatedSize = 2; } message DeleteTenantRequest { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java index 0fe1cdbe8031..4378701426c2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java @@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; import org.apache.hadoop.ozone.om.service.KeyDeletingService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; +import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import java.io.IOException; @@ -285,4 +286,10 @@ List getPendingDeletionSubFiles(long volumeId, * @return Background service. */ SnapshotDeletingService getSnapshotDeletingService(); + + /** + * Returns the instance of Snapshot Directory service. + * @return Background service. + */ + SnapshotDirectoryCleaningService getSnapshotDirectoryService(); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index c54750fee7dc..fdfb2e12a0e4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -88,6 +88,7 @@ import org.apache.hadoop.ozone.om.service.MultipartUploadCleanupService; import org.apache.hadoop.ozone.om.service.OpenKeyCleanupService; import org.apache.hadoop.ozone.om.service.SnapshotDeletingService; +import org.apache.hadoop.ozone.om.service.SnapshotDirectoryCleaningService; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager; @@ -134,6 +135,10 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_CLEANUP_SERVICE_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT_DEFAULT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.om.OzoneManagerUtils.getBucketLayout; @@ -184,6 +189,7 @@ public class KeyManagerImpl implements KeyManager { private BackgroundService openKeyCleanupService; private BackgroundService multipartUploadCleanupService; + private SnapshotDirectoryCleaningService snapshotDirectoryCleaningService; public KeyManagerImpl(OzoneManager om, ScmClient scmClient, OzoneConfiguration conf, OMPerformanceMetrics metrics) { @@ -303,6 +309,22 @@ public void start(OzoneConfiguration configuration) { } } + if (snapshotDirectoryCleaningService == null && + ozoneManager.isFilesystemSnapshotEnabled()) { + long dirDeleteInterval = configuration.getTimeDuration( + OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL, + OZONE_SNAPSHOT_DIRECTORY_SERVICE_INTERVAL_DEFAULT, + TimeUnit.MILLISECONDS); + long serviceTimeout = configuration.getTimeDuration( + OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT, + OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + snapshotDirectoryCleaningService = new SnapshotDirectoryCleaningService( + dirDeleteInterval, TimeUnit.MILLISECONDS, serviceTimeout, + ozoneManager, scmClient.getBlockClient()); + snapshotDirectoryCleaningService.start(); + } + if (multipartUploadCleanupService == null) { long serviceInterval = configuration.getTimeDuration( OZONE_OM_MPU_CLEANUP_SERVICE_INTERVAL, @@ -349,6 +371,10 @@ public void stop() throws IOException { multipartUploadCleanupService.shutdown(); multipartUploadCleanupService = null; } + if (snapshotDirectoryCleaningService != null) { + snapshotDirectoryCleaningService.shutdown(); + snapshotDirectoryCleaningService = null; + } } private OmBucketInfo getBucketInfo(String volumeName, String bucketName) @@ -684,6 +710,10 @@ public SnapshotDeletingService getSnapshotDeletingService() { return snapshotDeletingService; } + public SnapshotDirectoryCleaningService getSnapshotDirectoryService() { + return snapshotDirectoryCleaningService; + } + public boolean isSstFilteringSvcEnabled() { long serviceInterval = ozoneManager.getConfiguration() .getTimeDuration(OZONE_SNAPSHOT_SST_FILTERING_SERVICE_INTERVAL, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index d3ded0a9ae58..5abe08e55758 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -75,22 +75,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, try { List snapshotDbKeys = snapshotPurgeRequest .getSnapshotDBKeysList(); - List snapInfosToUpdate = snapshotPurgeRequest - .getUpdatedSnapshotDBKeyList(); Map updatedSnapInfos = new HashMap<>(); Map updatedPathPreviousAndGlobalSnapshots = new HashMap<>(); - // Snapshots that are already deepCleaned by the KeyDeletingService - // can be marked as deepCleaned. - for (String snapTableKey : snapInfosToUpdate) { - SnapshotInfo snapInfo = omMetadataManager.getSnapshotInfoTable() - .get(snapTableKey); - - updateSnapshotInfoAndCache(snapInfo, omMetadataManager, - trxnLogIndex, updatedSnapInfos, false); - } - // Snapshots that are purged by the SnapshotDeletingService // will update the next snapshot so that is can be deep cleaned // by the KeyDeletingService in the next run. @@ -110,7 +98,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, snapshotChainManager, omSnapshotManager); updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, - trxnLogIndex, updatedSnapInfos, true); + trxnLogIndex, updatedSnapInfos); updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, trxnLogIndex, updatedPathPreviousAndGlobalSnapshots); // Remove and close snapshot's RocksDB instance from SnapshotCache. @@ -134,9 +122,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManagerImpl omMetadataManager, long trxnLogIndex, - Map updatedSnapInfos, boolean deepClean) { + Map updatedSnapInfos) { if (snapInfo != null) { - snapInfo.setDeepClean(deepClean); + // Setting next snapshot deep clean to false, Since the + // current snapshot is deleted. We can potentially + // reclaim more keys in the next snapshot. + snapInfo.setDeepClean(false); // Update table cache first omMetadataManager.getSnapshotInfoTable().addCacheEntry( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index 32b3c87f919a..35f91a13f7bb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -29,7 +29,7 @@ import org.apache.hadoop.ozone.om.response.snapshot.OMSnapshotSetPropertyResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotProperty; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,16 +60,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OzoneManagerProtocolProtos.SetSnapshotPropertyRequest setSnapshotPropertyRequest = getOmRequest() .getSetSnapshotPropertyRequest(); - - SnapshotProperty snapshotProperty = setSnapshotPropertyRequest - .getSnapshotProperty(); SnapshotInfo updatedSnapInfo = null; try { - String snapshotKey = snapshotProperty.getSnapshotKey(); - long exclusiveSize = snapshotProperty.getExclusiveSize(); - long exclusiveReplicatedSize = snapshotProperty - .getExclusiveReplicatedSize(); + String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); updatedSnapInfo = metadataManager.getSnapshotInfoTable() .get(snapshotKey); @@ -79,9 +73,28 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, " is not found", INVALID_SNAPSHOT_ERROR); } - // Set Exclusive size. - updatedSnapInfo.setExclusiveSize(exclusiveSize); - updatedSnapInfo.setExclusiveReplicatedSize(exclusiveReplicatedSize); + if (setSnapshotPropertyRequest.hasDeepCleanedDeletedDir()) { + updatedSnapInfo.setDeepCleanedDeletedDir(setSnapshotPropertyRequest + .getDeepCleanedDeletedDir()); + } + + if (setSnapshotPropertyRequest.hasDeepCleanedDeletedKey()) { + updatedSnapInfo.setDeepClean(setSnapshotPropertyRequest + .getDeepCleanedDeletedKey()); + } + + if (setSnapshotPropertyRequest.hasSnapshotSize()) { + SnapshotSize snapshotSize = setSnapshotPropertyRequest + .getSnapshotSize(); + long exclusiveSize = updatedSnapInfo.getExclusiveSize() + + snapshotSize.getExclusiveSize(); + long exclusiveReplicatedSize = updatedSnapInfo + .getExclusiveReplicatedSize() + snapshotSize + .getExclusiveReplicatedSize(); + // Set Exclusive size. + updatedSnapInfo.setExclusiveSize(exclusiveSize); + updatedSnapInfo.setExclusiveReplicatedSize(exclusiveReplicatedSize); + } // Update Table Cache metadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(snapshotKey), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 1091053ebdc4..21ad0872769a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -469,6 +469,106 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, return remainNum; } + /** + * To calculate Exclusive Size for current snapshot, Check + * the next snapshot deletedTable if the deleted key is + * referenced in current snapshot and not referenced in the + * previous snapshot then that key is exclusive to the current + * snapshot. Here since we are only iterating through + * deletedTable we can check the previous and previous to + * previous snapshot to achieve the same. + * previousSnapshot - Snapshot for which exclusive size is + * getting calculating. + * currSnapshot - Snapshot's deletedTable is used to calculate + * previousSnapshot snapshot's exclusive size. + * previousToPrevSnapshot - Snapshot which is used to check + * if key is exclusive to previousSnapshot. + */ + @SuppressWarnings("checkstyle:ParameterNumber") + public void calculateExclusiveSize( + SnapshotInfo previousSnapshot, + SnapshotInfo previousToPrevSnapshot, + OmKeyInfo keyInfo, + OmBucketInfo bucketInfo, long volumeId, + Table snapRenamedTable, + Table previousKeyTable, + Table prevRenamedTable, + Table previousToPrevKeyTable, + Map exclusiveSizeMap, + Map exclusiveReplicatedSizeMap) throws IOException { + String prevSnapKey = previousSnapshot.getTableKey(); + long exclusiveReplicatedSize = + exclusiveReplicatedSizeMap.getOrDefault( + prevSnapKey, 0L) + keyInfo.getReplicatedSize(); + long exclusiveSize = exclusiveSizeMap.getOrDefault( + prevSnapKey, 0L) + keyInfo.getDataSize(); + + // If there is no previous to previous snapshot, then + // the previous snapshot is the first snapshot. + if (previousToPrevSnapshot == null) { + exclusiveSizeMap.put(prevSnapKey, exclusiveSize); + exclusiveReplicatedSizeMap.put(prevSnapKey, + exclusiveReplicatedSize); + } else { + OmKeyInfo keyInfoPrevSnapshot = getPreviousSnapshotKeyName( + keyInfo, bucketInfo, volumeId, + snapRenamedTable, previousKeyTable); + OmKeyInfo keyInfoPrevToPrevSnapshot = getPreviousSnapshotKeyName( + keyInfoPrevSnapshot, bucketInfo, volumeId, + prevRenamedTable, previousToPrevKeyTable); + // If the previous to previous snapshot doesn't + // have the key, then it is exclusive size for the + // previous snapshot. + if (keyInfoPrevToPrevSnapshot == null) { + exclusiveSizeMap.put(prevSnapKey, exclusiveSize); + exclusiveReplicatedSizeMap.put(prevSnapKey, + exclusiveReplicatedSize); + } + } + } + + private OmKeyInfo getPreviousSnapshotKeyName( + OmKeyInfo keyInfo, OmBucketInfo bucketInfo, long volumeId, + Table snapRenamedTable, + Table previousKeyTable) throws IOException { + + if (keyInfo == null) { + return null; + } + + String dbKeyPrevSnap; + if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { + dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzonePathKey( + volumeId, + bucketInfo.getObjectID(), + keyInfo.getParentObjectID(), + keyInfo.getFileName()); + } else { + dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzoneKey( + keyInfo.getVolumeName(), + keyInfo.getBucketName(), + keyInfo.getKeyName()); + } + + String dbRenameKey = getOzoneManager().getMetadataManager().getRenameKey( + keyInfo.getVolumeName(), + keyInfo.getBucketName(), + keyInfo.getObjectID()); + + String renamedKey = snapRenamedTable.getIfExist(dbRenameKey); + OmKeyInfo prevKeyInfo = renamedKey != null ? + previousKeyTable.get(renamedKey) : + previousKeyTable.get(dbKeyPrevSnap); + + if (prevKeyInfo == null || + prevKeyInfo.getObjectID() != keyInfo.getObjectID()) { + return null; + } + + return isBlockLocationInfoSame(prevKeyInfo, keyInfo) ? + prevKeyInfo : null; + } + protected boolean isBufferLimitCrossed( int maxLimit, int cLimit, int increment) { return cLimit + increment >= maxLimit; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index 6dcc2544b4da..e89608e82db2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -48,8 +48,7 @@ import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotPurgeRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotProperty; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; import org.apache.hadoop.hdds.utils.BackgroundTask; @@ -98,6 +97,7 @@ public class KeyDeletingService extends AbstractKeyDeletingService { private final Map exclusiveSizeMap; private final Map exclusiveReplicatedSizeMap; private final Set completedExclusiveSizeSet; + private final Map snapshotSeekMap; public KeyDeletingService(OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient, @@ -116,6 +116,7 @@ public KeyDeletingService(OzoneManager ozoneManager, this.exclusiveSizeMap = new HashMap<>(); this.exclusiveReplicatedSizeMap = new HashMap<>(); this.completedExclusiveSizeSet = new HashSet<>(); + this.snapshotSeekMap = new HashMap<>(); } /** @@ -258,8 +259,8 @@ private void processSnapshotDeepClean(int delCount) // Deep clean only on active snapshot. Deleted Snapshots will be // cleaned up by SnapshotDeletingService. - if (!currSnapInfo.getSnapshotStatus().equals(SNAPSHOT_ACTIVE) || - !currSnapInfo.getDeepClean()) { + if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || + currSnapInfo.getDeepClean()) { continue; } @@ -342,11 +343,22 @@ private void processSnapshotDeepClean(int delCount) RepeatedOmKeyInfo>> deletedIterator = snapDeletedTable .iterator()) { - deletedIterator.seek(snapshotBucketKey); + String lastKeyInCurrentRun = null; + String deletedTableSeek = snapshotSeekMap.getOrDefault( + currSnapInfo.getTableKey(), snapshotBucketKey); + deletedIterator.seek(deletedTableSeek); + // To avoid processing the last key from the previous + // run again. + if (!deletedTableSeek.equals(snapshotBucketKey) && + deletedIterator.hasNext()) { + deletedIterator.next(); + } + while (deletedIterator.hasNext() && delCount < keyLimitPerTask) { Table.KeyValue deletedKeyValue = deletedIterator.next(); String deletedKey = deletedKeyValue.getKey(); + lastKeyInCurrentRun = deletedKey; // Exit if it is out of the bucket scope. if (!deletedKey.startsWith(snapshotBucketKey)) { @@ -366,7 +378,8 @@ private void processSnapshotDeepClean(int delCount) calculateExclusiveSize(previousSnapshot, previousToPrevSnapshot, keyInfo, bucketInfo, volumeId, snapRenamedTable, previousKeyTable, prevRenamedTable, - previousToPrevKeyTable); + previousToPrevKeyTable, exclusiveSizeMap, + exclusiveReplicatedSizeMap); } if (isKeyReclaimable(previousKeyTable, snapRenamedTable, @@ -406,6 +419,15 @@ private void processSnapshotDeepClean(int delCount) completedExclusiveSizeSet.add( previousSnapshot.getTableKey()); } + + snapshotSeekMap.remove(currSnapInfo.getTableKey()); + } else { + // There are keys that still needs processing + // we can continue from it in the next iteration + if (lastKeyInCurrentRun != null) { + snapshotSeekMap.put(currSnapInfo.getTableKey(), + lastKeyInCurrentRun); + } } if (!keysToPurge.isEmpty()) { @@ -420,98 +442,8 @@ private void processSnapshotDeepClean(int delCount) } } - updateSnapshotExclusiveSize(); updateDeepCleanedSnapshots(deepCleanedSnapshots); - } - - /** - * To calculate Exclusive Size for current snapshot, Check - * the next snapshot deletedTable if the deleted key is - * referenced in current snapshot and not referenced in the - * previous snapshot then that key is exclusive to the current - * snapshot. Here since we are only iterating through - * deletedTable we can check the previous and previous to - * previous snapshot to achieve the same. - * previousSnapshot - Snapshot for which exclusive size is - * getting calculating. - * currSnapshot - Snapshot's deletedTable is used to calculate - * previousSnapshot snapshot's exclusive size. - * previousToPrevSnapshot - Snapshot which is used to check - * if key is exclusive to previousSnapshot. - */ - @SuppressWarnings("checkstyle:ParameterNumber") - private void calculateExclusiveSize( - SnapshotInfo previousSnapshot, - SnapshotInfo previousToPrevSnapshot, - OmKeyInfo keyInfo, - OmBucketInfo bucketInfo, long volumeId, - Table snapRenamedTable, - Table previousKeyTable, - Table prevRenamedTable, - Table previousToPrevKeyTable) throws IOException { - String prevSnapKey = previousSnapshot.getTableKey(); - long exclusiveReplicatedSize = - exclusiveReplicatedSizeMap.getOrDefault( - prevSnapKey, 0L) + keyInfo.getReplicatedSize(); - long exclusiveSize = exclusiveSizeMap.getOrDefault( - prevSnapKey, 0L) + keyInfo.getDataSize(); - - // If there is no previous to previous snapshot, then - // the previous snapshot is the first snapshot. - if (previousToPrevSnapshot == null) { - exclusiveSizeMap.put(prevSnapKey, exclusiveSize); - exclusiveReplicatedSizeMap.put(prevSnapKey, - exclusiveReplicatedSize); - } else { - OmKeyInfo keyInfoPrevSnapshot = getPreviousSnapshotKeyName( - keyInfo, bucketInfo, volumeId, - snapRenamedTable, previousKeyTable); - OmKeyInfo keyInfoPrevToPrevSnapshot = getPreviousSnapshotKeyName( - keyInfoPrevSnapshot, bucketInfo, volumeId, - prevRenamedTable, previousToPrevKeyTable); - // If the previous to previous snapshot doesn't - // have the key, then it is exclusive size for the - // previous snapshot. - if (keyInfoPrevToPrevSnapshot == null) { - exclusiveSizeMap.put(prevSnapKey, exclusiveSize); - exclusiveReplicatedSizeMap.put(prevSnapKey, - exclusiveReplicatedSize); - } - } - } - - private OmKeyInfo getPreviousSnapshotKeyName( - OmKeyInfo keyInfo, OmBucketInfo bucketInfo, long volumeId, - Table snapRenamedTable, - Table previousKeyTable) throws IOException { - - if (keyInfo == null) { - return null; - } - - String dbKeyPrevSnap; - if (bucketInfo.getBucketLayout().isFileSystemOptimized()) { - dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzonePathKey( - volumeId, - bucketInfo.getObjectID(), - keyInfo.getParentObjectID(), - keyInfo.getFileName()); - } else { - dbKeyPrevSnap = getOzoneManager().getMetadataManager().getOzoneKey( - keyInfo.getVolumeName(), - keyInfo.getBucketName(), - keyInfo.getKeyName()); - } - - String dbRenameKey = getOzoneManager().getMetadataManager().getRenameKey( - keyInfo.getVolumeName(), - keyInfo.getBucketName(), - keyInfo.getObjectID()); - - String renamedKey = snapRenamedTable.getIfExist(dbRenameKey); - dbKeyPrevSnap = renamedKey != null ? renamedKey : dbKeyPrevSnap; - - return previousKeyTable.get(dbKeyPrevSnap); + updateSnapshotExclusiveSize(); } private void updateSnapshotExclusiveSize() { @@ -525,15 +457,15 @@ private void updateSnapshotExclusiveSize() { while (completedSnapshotIterator.hasNext()) { ClientId clientId = ClientId.randomId(); String dbKey = completedSnapshotIterator.next(); - SnapshotProperty snapshotProperty = SnapshotProperty.newBuilder() - .setSnapshotKey(dbKey) - .setExclusiveSize(exclusiveSizeMap.get(dbKey)) + SnapshotSize snapshotSize = SnapshotSize.newBuilder() + .setExclusiveSize(exclusiveSizeMap.getOrDefault(dbKey, 0L)) .setExclusiveReplicatedSize( - exclusiveReplicatedSizeMap.get(dbKey)) + exclusiveReplicatedSizeMap.getOrDefault(dbKey, 0L)) .build(); SetSnapshotPropertyRequest setSnapshotPropertyRequest = SetSnapshotPropertyRequest.newBuilder() - .setSnapshotProperty(snapshotProperty) + .setSnapshotKey(dbKey) + .setSnapshotSize(snapshotSize) .build(); OMRequest omRequest = OMRequest.newBuilder() @@ -549,16 +481,17 @@ private void updateSnapshotExclusiveSize() { } private void updateDeepCleanedSnapshots(List deepCleanedSnapshots) { - if (!deepCleanedSnapshots.isEmpty()) { + for (String deepCleanedSnapshot: deepCleanedSnapshots) { ClientId clientId = ClientId.randomId(); - SnapshotPurgeRequest snapshotPurgeRequest = SnapshotPurgeRequest - .newBuilder() - .addAllUpdatedSnapshotDBKey(deepCleanedSnapshots) - .build(); + SetSnapshotPropertyRequest setSnapshotPropertyRequest = + SetSnapshotPropertyRequest.newBuilder() + .setSnapshotKey(deepCleanedSnapshot) + .setDeepCleanedDeletedKey(true) + .build(); OMRequest omRequest = OMRequest.newBuilder() - .setCmdType(Type.SnapshotPurge) - .setSnapshotPurgeRequest(snapshotPurgeRequest) + .setCmdType(Type.SetSnapshotProperty) + .setSetSnapshotPropertyRequest(setSnapshotPropertyRequest) .setClientId(clientId.toString()) .build(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java new file mode 100644 index 000000000000..9a60f6303861 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/SnapshotDirectoryCleaningService.java @@ -0,0 +1,515 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om.service; + +import com.google.common.annotations.VisibleForTesting; +import com.google.protobuf.ServiceException; +import org.apache.commons.lang3.StringUtils; +import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.utils.BackgroundTask; +import org.apache.hadoop.hdds.utils.BackgroundTaskQueue; +import org.apache.hadoop.hdds.utils.BackgroundTaskResult; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.common.BlockGroup; +import org.apache.hadoop.ozone.om.IOmMetadataReader; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.OmSnapshot; +import org.apache.hadoop.ozone.om.OmSnapshotManager; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.SnapshotChainManager; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted; +import org.apache.hadoop.ozone.om.snapshot.SnapshotCache; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; +import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Stack; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPrefix; +import static org.apache.hadoop.ozone.om.helpers.SnapshotInfo.SnapshotStatus.SNAPSHOT_ACTIVE; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.getDirectoryInfo; +import static org.apache.hadoop.ozone.om.snapshot.SnapshotUtils.getOzonePathKeyForFso; + +/** + * Snapshot BG Service for deleted directory deep clean and exclusive size + * calculation for deleted directories. + */ +public class SnapshotDirectoryCleaningService + extends AbstractKeyDeletingService { + // Use only a single thread for DirDeletion. Multiple threads would read + // or write to same tables and can send deletion requests for same key + // multiple times. + private static final int SNAPSHOT_DIR_CORE_POOL_SIZE = 1; + + private final AtomicBoolean suspended; + private final Map exclusiveSizeMap; + private final Map exclusiveReplicatedSizeMap; + + public SnapshotDirectoryCleaningService(long interval, TimeUnit unit, + long serviceTimeout, + OzoneManager ozoneManager, + ScmBlockLocationProtocol scmClient) { + super(SnapshotDirectoryCleaningService.class.getSimpleName(), + interval, unit, SNAPSHOT_DIR_CORE_POOL_SIZE, serviceTimeout, + ozoneManager, scmClient); + this.suspended = new AtomicBoolean(false); + this.exclusiveSizeMap = new HashMap<>(); + this.exclusiveReplicatedSizeMap = new HashMap<>(); + } + + private boolean shouldRun() { + if (getOzoneManager() == null) { + // OzoneManager can be null for testing + return true; + } + return getOzoneManager().isLeaderReady() && !suspended.get(); + } + + /** + * Suspend the service. + */ + @VisibleForTesting + public void suspend() { + suspended.set(true); + } + + /** + * Resume the service if suspended. + */ + @VisibleForTesting + public void resume() { + suspended.set(false); + } + + @Override + public BackgroundTaskQueue getTasks() { + BackgroundTaskQueue queue = new BackgroundTaskQueue(); + queue.add(new SnapshotDirectoryCleaningService.SnapshotDirTask()); + return queue; + } + + private class SnapshotDirTask implements BackgroundTask { + + @Override + public BackgroundTaskResult call() { + if (!shouldRun()) { + return BackgroundTaskResult.EmptyTaskResult.newResult(); + } + LOG.debug("Running SnapshotDirectoryCleaningService"); + + getRunCount().incrementAndGet(); + OmSnapshotManager omSnapshotManager = + getOzoneManager().getOmSnapshotManager(); + Table snapshotInfoTable = + getOzoneManager().getMetadataManager().getSnapshotInfoTable(); + OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) + getOzoneManager().getMetadataManager(); + SnapshotChainManager snapChainManager = metadataManager + .getSnapshotChainManager(); + + try (TableIterator> iterator = snapshotInfoTable.iterator()) { + + while (iterator.hasNext()) { + SnapshotInfo currSnapInfo = iterator.next().getValue(); + + // Expand deleted dirs only on active snapshot. Deleted Snapshots + // will be cleaned up by SnapshotDeletingService. + if (currSnapInfo.getSnapshotStatus() != SNAPSHOT_ACTIVE || + currSnapInfo.getDeepCleanedDeletedDir()) { + continue; + } + + ReferenceCounted + rcPrevOmSnapshot = null; + ReferenceCounted + rcPrevToPrevOmSnapshot = null; + try { + long volumeId = metadataManager + .getVolumeId(currSnapInfo.getVolumeName()); + // Get bucketInfo for the snapshot bucket to get bucket layout. + String dbBucketKey = metadataManager + .getBucketKey(currSnapInfo.getVolumeName(), + currSnapInfo.getBucketName()); + OmBucketInfo bucketInfo = metadataManager + .getBucketTable().get(dbBucketKey); + + if (bucketInfo == null) { + throw new IllegalStateException("Bucket " + "/" + + currSnapInfo.getVolumeName() + "/" + currSnapInfo + .getBucketName() + + " is not found. BucketInfo should not be " + + "null for snapshotted bucket. The OM is in " + + "unexpected state."); + } + + SnapshotInfo previousSnapshot = getPreviousActiveSnapshot( + currSnapInfo, snapChainManager, omSnapshotManager); + SnapshotInfo previousToPrevSnapshot = null; + + Table previousKeyTable = null; + Table prevRenamedTable = null; + + if (previousSnapshot != null) { + rcPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + previousSnapshot.getVolumeName(), + previousSnapshot.getBucketName(), + getSnapshotPrefix(previousSnapshot.getName()), false); + OmSnapshot omPreviousSnapshot = (OmSnapshot) + rcPrevOmSnapshot.get(); + + previousKeyTable = omPreviousSnapshot.getMetadataManager() + .getKeyTable(bucketInfo.getBucketLayout()); + prevRenamedTable = omPreviousSnapshot + .getMetadataManager().getSnapshotRenamedTable(); + previousToPrevSnapshot = getPreviousActiveSnapshot( + previousSnapshot, snapChainManager, omSnapshotManager); + } + + Table previousToPrevKeyTable = null; + if (previousToPrevSnapshot != null) { + rcPrevToPrevOmSnapshot = omSnapshotManager.checkForSnapshot( + previousToPrevSnapshot.getVolumeName(), + previousToPrevSnapshot.getBucketName(), + getSnapshotPrefix(previousToPrevSnapshot.getName()), false); + OmSnapshot omPreviousToPrevSnapshot = (OmSnapshot) + rcPrevToPrevOmSnapshot.get(); + + previousToPrevKeyTable = omPreviousToPrevSnapshot + .getMetadataManager() + .getKeyTable(bucketInfo.getBucketLayout()); + } + + String dbBucketKeyForDir = getOzonePathKeyForFso(metadataManager, + currSnapInfo.getVolumeName(), currSnapInfo.getBucketName()); + try (ReferenceCounted + rcCurrOmSnapshot = omSnapshotManager.checkForSnapshot( + currSnapInfo.getVolumeName(), + currSnapInfo.getBucketName(), + getSnapshotPrefix(currSnapInfo.getName()), + false)) { + + OmSnapshot currOmSnapshot = (OmSnapshot) rcCurrOmSnapshot.get(); + Table snapDeletedDirTable = + currOmSnapshot.getMetadataManager().getDeletedDirTable(); + + try (TableIterator> deletedDirIterator = snapDeletedDirTable + .iterator(dbBucketKeyForDir)) { + + while (deletedDirIterator.hasNext()) { + Table.KeyValue deletedDirInfo = + deletedDirIterator.next(); + + // For each deleted directory we do an in-memory DFS and + // do a deep clean and exclusive size calculation. + iterateDirectoryTree(deletedDirInfo, volumeId, bucketInfo, + previousSnapshot, previousToPrevSnapshot, + currOmSnapshot, previousKeyTable, prevRenamedTable, + previousToPrevKeyTable, dbBucketKeyForDir); + } + updateDeepCleanSnapshotDir(currSnapInfo.getTableKey()); + if (previousSnapshot != null) { + updateExclusiveSize(previousSnapshot.getTableKey()); + } + } + } + } finally { + IOUtils.closeQuietly(rcPrevOmSnapshot, rcPrevToPrevOmSnapshot); + } + } + } catch (IOException ex) { + LOG.error("Error while running directory deep clean on snapshots." + + " Will retry at next run.", ex); + } + return BackgroundTaskResult.EmptyTaskResult.newResult(); + } + } + + @SuppressWarnings("checkstyle:ParameterNumber") + private void iterateDirectoryTree( + Table.KeyValue deletedDirInfo, long volumeId, + OmBucketInfo bucketInfo, + SnapshotInfo previousSnapshot, + SnapshotInfo previousToPrevSnapshot, + OmSnapshot currOmSnapshot, + Table previousKeyTable, + Table prevRenamedTable, + Table previousToPrevKeyTable, + String dbBucketKeyForDir) throws IOException { + + Table snapDirTable = + currOmSnapshot.getMetadataManager().getDirectoryTable(); + Table snapRenamedTable = + currOmSnapshot.getMetadataManager().getSnapshotRenamedTable(); + + Stack stackNodes = new Stack<>(); + OmDirectoryInfo omDeletedDirectoryInfo = + getDirectoryInfo(deletedDirInfo.getValue()); + String dirPathDbKey = currOmSnapshot.getMetadataManager() + .getOzonePathKey(volumeId, bucketInfo.getObjectID(), + omDeletedDirectoryInfo); + // Stack Init + StackNode topLevelDir = new StackNode(); + topLevelDir.setDirKey(dirPathDbKey); + topLevelDir.setDirValue(omDeletedDirectoryInfo); + stackNodes.push(topLevelDir); + + try (TableIterator> + directoryIterator = snapDirTable.iterator(dbBucketKeyForDir)) { + + while (!stackNodes.isEmpty()) { + StackNode stackTop = stackNodes.peek(); + // First process all the files in the current directory + // and then do a DFS for directory. + if (StringUtils.isEmpty(stackTop.getSubDirSeek())) { + processFilesUnderDir(previousSnapshot, + previousToPrevSnapshot, + volumeId, + bucketInfo, + stackTop.getDirValue(), + currOmSnapshot.getMetadataManager(), + snapRenamedTable, + previousKeyTable, + prevRenamedTable, + previousToPrevKeyTable); + // Format : /volId/bucketId/parentId/ + String seekDirInDB = currOmSnapshot.getMetadataManager() + .getOzonePathKey(volumeId, bucketInfo.getObjectID(), + stackTop.getDirValue().getObjectID(), ""); + stackTop.setSubDirSeek(seekDirInDB); + } else { + // Adding \0 to seek the next greater element. + directoryIterator.seek(stackTop.getSubDirSeek() + "\0"); + if (directoryIterator.hasNext()) { + + Table.KeyValue deletedSubDirInfo = directoryIterator.next(); + String deletedSubDirKey = deletedSubDirInfo.getKey(); + String prefixCheck = currOmSnapshot.getMetadataManager() + .getOzoneDeletePathDirKey(stackTop.getSubDirSeek()); + // Exit if it is out of the sub dir prefix scope. + if (!deletedSubDirKey.startsWith(prefixCheck)) { + stackNodes.pop(); + } else { + stackTop.setSubDirSeek(deletedSubDirKey); + StackNode nextSubDir = new StackNode(); + nextSubDir.setDirKey(deletedSubDirInfo.getKey()); + nextSubDir.setDirValue(deletedSubDirInfo.getValue()); + stackNodes.push(nextSubDir); + } + } else { + stackNodes.pop(); + } + } + } + } + } + + private void updateExclusiveSize(String prevSnapshotKeyTable) { + ClientId clientId = ClientId.randomId(); + SnapshotSize snapshotSize = SnapshotSize.newBuilder() + .setExclusiveSize( + exclusiveSizeMap.getOrDefault(prevSnapshotKeyTable, 0L)) + .setExclusiveReplicatedSize( + exclusiveReplicatedSizeMap.getOrDefault( + prevSnapshotKeyTable, 0L)) + .build(); + exclusiveSizeMap.remove(prevSnapshotKeyTable); + exclusiveReplicatedSizeMap.remove(prevSnapshotKeyTable); + SetSnapshotPropertyRequest + setSnapshotPropertyRequest = + SetSnapshotPropertyRequest.newBuilder() + .setSnapshotKey(prevSnapshotKeyTable) + .setSnapshotSize(snapshotSize) + .build(); + + OMRequest omRequest = OMRequest.newBuilder() + .setCmdType(Type.SetSnapshotProperty) + .setSetSnapshotPropertyRequest(setSnapshotPropertyRequest) + .setClientId(clientId.toString()) + .build(); + + submitRequest(omRequest, clientId); + } + + @SuppressWarnings("checkstyle:ParameterNumber") + private void processFilesUnderDir( + SnapshotInfo previousSnapshot, + SnapshotInfo previousToPrevSnapshot, + long volumeId, + OmBucketInfo bucketInfo, + OmDirectoryInfo parentInfo, + OMMetadataManager metadataManager, + Table snapRenamedTable, + Table previousKeyTable, + Table prevRenamedTable, + Table previousToPrevKeyTable) + throws IOException { + String seekFileInDB = metadataManager.getOzonePathKey(volumeId, + bucketInfo.getObjectID(), + parentInfo.getObjectID(), ""); + List blocksForKeyDelete = new ArrayList<>(); + + Table fileTable = metadataManager.getFileTable(); + try (TableIterator> + iterator = fileTable.iterator(seekFileInDB)) { + + while (iterator.hasNext()) { + Table.KeyValue entry = iterator.next(); + OmKeyInfo fileInfo = entry.getValue(); + if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(), + parentInfo.getObjectID())) { + break; + } + + String ozoneDeletePathKey = metadataManager + .getOzoneDeletePathKey(fileInfo.getObjectID(), entry.getKey()); + if (isKeyReclaimable(previousKeyTable, snapRenamedTable, + fileInfo, bucketInfo, volumeId, null)) { + for (OmKeyLocationInfoGroup keyLocations : + fileInfo.getKeyLocationVersions()) { + List item = keyLocations.getLocationList().stream() + .map(b -> new BlockID(b.getContainerID(), b.getLocalID())) + .collect(Collectors.toList()); + BlockGroup keyBlocks = BlockGroup.newBuilder() + .setKeyName(ozoneDeletePathKey) + .addAllBlockIDs(item) + .build(); + blocksForKeyDelete.add(keyBlocks); + } + // TODO: Add Retry mechanism. + getScmClient().deleteKeyBlocks(blocksForKeyDelete); + } else if (previousSnapshot != null) { + calculateExclusiveSize(previousSnapshot, previousToPrevSnapshot, + fileInfo, bucketInfo, volumeId, snapRenamedTable, + previousKeyTable, prevRenamedTable, previousToPrevKeyTable, + exclusiveSizeMap, exclusiveReplicatedSizeMap); + } + } + } + } + + private void updateDeepCleanSnapshotDir(String snapshotKeyTable) { + ClientId clientId = ClientId.randomId(); + SetSnapshotPropertyRequest setSnapshotPropertyRequest = + SetSnapshotPropertyRequest.newBuilder() + .setSnapshotKey(snapshotKeyTable) + .setDeepCleanedDeletedDir(true) + .build(); + + OMRequest omRequest = OMRequest.newBuilder() + .setCmdType(Type.SetSnapshotProperty) + .setSetSnapshotPropertyRequest(setSnapshotPropertyRequest) + .setClientId(clientId.toString()) + .build(); + + submitRequest(omRequest, clientId); + } + + public void submitRequest(OMRequest omRequest, ClientId clientId) { + try { + if (isRatisEnabled()) { + OzoneManagerRatisServer server = + getOzoneManager().getOmRatisServer(); + + RaftClientRequest raftClientRequest = RaftClientRequest.newBuilder() + .setClientId(clientId) + .setServerId(server.getRaftPeerId()) + .setGroupId(server.getRaftGroupId()) + .setCallId(getRunCount().get()) + .setMessage(Message.valueOf( + OMRatisHelper.convertRequestToByteString(omRequest))) + .setType(RaftClientRequest.writeRequestType()) + .build(); + + server.submitRequest(omRequest, raftClientRequest); + } else { + getOzoneManager().getOmServerProtocol() + .submitRequest(null, omRequest); + } + } catch (ServiceException e) { + LOG.error("Snapshot deep cleaning request failed. " + + "Will retry at next run.", e); + } + } + + /** + * Stack node data for directory deep clean for snapshot. + */ + private static class StackNode { + private String dirKey; + private OmDirectoryInfo dirValue; + private String subDirSeek; + + public String getDirKey() { + return dirKey; + } + + public void setDirKey(String dirKey) { + this.dirKey = dirKey; + } + + public OmDirectoryInfo getDirValue() { + return dirValue; + } + + public void setDirValue(OmDirectoryInfo dirValue) { + this.dirValue = dirValue; + } + + public String getSubDirSeek() { + return subDirSeek; + } + + public void setSubDirSeek(String subDirSeek) { + this.subDirSeek = subDirSeek; + } + + @Override + public String toString() { + return "StackNode{" + + "dirKey='" + dirKey + '\'' + + ", dirObjectId=" + dirValue.getObjectID() + + ", subDirSeek='" + subDirSeek + '\'' + + '}'; + } + } +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java index 7b0e5678e46b..b1dcb2ad81d5 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java @@ -36,7 +36,7 @@ import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotProperty; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotSize; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetSnapshotPropertyRequest; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.BeforeEach; @@ -151,14 +151,14 @@ private List createSnapshotUpdateSizeRequest() iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { while (iterator.hasNext()) { String snapDbKey = iterator.next().getKey(); - SnapshotProperty snapshotSize = SnapshotProperty.newBuilder() - .setSnapshotKey(snapDbKey) + SnapshotSize snapshotSize = SnapshotSize.newBuilder() .setExclusiveSize(exclusiveSize) .setExclusiveReplicatedSize(exclusiveSizeAfterRepl) .build(); SetSnapshotPropertyRequest snapshotUpdateSizeRequest = SetSnapshotPropertyRequest.newBuilder() - .setSnapshotProperty(snapshotSize) + .setSnapshotKey(snapDbKey) + .setSnapshotSize(snapshotSize) .build(); OMRequest omRequest = OMRequest.newBuilder() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java index 8ca01b2d6433..1131f81c5773 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestKeyDeletingService.java @@ -118,7 +118,7 @@ private OzoneConfiguration createConfAndInitValues() throws IOException { } System.setProperty(DBConfigFromFile.CONFIG_DIR, "/"); ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString()); - conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); @@ -204,7 +204,7 @@ public void checkIfDeleteServiceWithFailingSCM() // Make sure that we have run the background thread 5 times more GenericTestUtils.waitFor( () -> keyDeletingService.getRunCount().get() >= 5, - 100, 1000); + 100, 10000); // Since SCM calls are failing, deletedKeyCount should be zero. Assertions.assertEquals(0, keyDeletingService.getDeletedKeyCount().get()); Assertions.assertEquals(keyCount, keyManager @@ -542,7 +542,7 @@ public void testSnapshotDeepClean() throws Exception { // Create Snap3, traps all the deleted keys. writeClient.createSnapshot(volumeName, bucketName, "snap3"); assertTableRowCount(snapshotInfoTable, 3, metadataManager); - checkSnapDeepCleanStatus(snapshotInfoTable, true); + checkSnapDeepCleanStatus(snapshotInfoTable, false); keyDeletingService.resume(); @@ -562,9 +562,8 @@ volumeName, bucketName, getSnapshotPrefix("snap3"), true)) { assertTableRowCount(snap3deletedTable, 0, metadataManager); assertTableRowCount(deletedTable, 0, metadataManager); - checkSnapDeepCleanStatus(snapshotInfoTable, false); + checkSnapDeepCleanStatus(snapshotInfoTable, true); } - } @Test From 190dcde673531085f091df1d5f0131ba633ac683 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 4 Apr 2024 04:22:22 +0800 Subject: [PATCH 06/17] HDDS-10590. [Snapshot] Synchronized snapshot purge, set snapshot property and SstFilteringService (#6456) (cherry picked from commit 7da5ecb5855b773fd262a839f8791f50ebca6fca) --- .../hadoop/ozone/om/helpers/SnapshotInfo.java | 2 +- .../snapshot/OMSnapshotPurgeRequest.java | 219 +++++++++++------- .../OMSnapshotSetPropertyRequest.java | 38 ++- .../snapshot/OMSnapshotPurgeResponse.java | 2 +- 4 files changed, 174 insertions(+), 87 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java index 49e85a4c7201..70824161a6b5 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/SnapshotInfo.java @@ -761,7 +761,7 @@ public SnapshotInfo copyObject() { public String toString() { return "SnapshotInfo{" + "snapshotId: '" + snapshotId + '\'' + - ", name: '" + name + "'," + + ", name: '" + name + '\'' + ", volumeName: '" + volumeName + '\'' + ", bucketName: '" + bucketName + '\'' + ", snapshotStatus: '" + snapshotStatus + '\'' + diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 5abe08e55758..120083869ee6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -19,6 +19,10 @@ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.commons.lang3.tuple.Triple; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -39,11 +43,15 @@ import java.io.IOException; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.NoSuchElementException; +import java.util.Set; import java.util.UUID; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.SNAPSHOT_LOCK; + /** * Handles OMSnapshotPurge Request. * This is an OM internal request. Does not need @RequireSnapshotFeatureState. @@ -79,34 +87,63 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Map updatedPathPreviousAndGlobalSnapshots = new HashMap<>(); - // Snapshots that are purged by the SnapshotDeletingService - // will update the next snapshot so that is can be deep cleaned - // by the KeyDeletingService in the next run. + // Each snapshot purge operation does three things: + // 1. Update the snapshot chain, + // 2. Update the deep clean flag for the next active snapshot (So that it can be + // deep cleaned by the KeyDeletingService in the next run), + // 3. Finally, purge the snapshot. + // All of these steps have to be performed only when it acquires all the necessary + // locks (lock on the snapshot to be purged, lock on the next active snapshot, and + // lock on the next path and global previous snapshots). Ideally, there is no need + // for locks for snapshot purge and can rely on OMStateMachine because OMStateMachine + // is going to process each request sequentially. + // + // But there is a problem with that. After filtering unnecessary SST files for a snapshot, + // SstFilteringService updates that snapshot's SstFilter flag. SstFilteringService cannot + // use SetSnapshotProperty API because it runs on each OM independently and One OM does + // not know if the snapshot has been filtered on the other OM in HA environment. + // + // If locks are not taken snapshot purge and SstFilteringService will cause a race condition + // and override one's update with another. for (String snapTableKey : snapshotDbKeys) { - SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable() - .get(snapTableKey); - - if (fromSnapshot == null) { - // Snapshot may have been purged in the previous iteration of SnapshotDeletingService. - LOG.warn("The snapshot {} is not longer in snapshot table, It maybe removed in the previous " + - "Snapshot purge request.", snapTableKey); - continue; - } + // To acquire all the locks, a set is maintained which is keyed by snapshotTableKey. + // snapshotTableKey is nothing but /volumeName/bucketName/snapshotName. + // Once all the locks are acquired, it performs the three steps mentioned above and + // release all the locks after that. + Set> lockSet = new HashSet<>(4, 1); + try { + if (omMetadataManager.getSnapshotInfoTable().get(snapTableKey) == null) { + // Snapshot may have been purged in the previous iteration of SnapshotDeletingService. + LOG.warn("The snapshot {} is not longer in snapshot table, It maybe removed in the previous " + + "Snapshot purge request.", snapTableKey); + continue; + } + + acquireLock(lockSet, snapTableKey, omMetadataManager); + SnapshotInfo fromSnapshot = omMetadataManager.getSnapshotInfoTable().get(snapTableKey); + + SnapshotInfo nextSnapshot = + SnapshotUtils.getNextActiveSnapshot(fromSnapshot, snapshotChainManager, omSnapshotManager); - SnapshotInfo nextSnapshot = SnapshotUtils - .getNextActiveSnapshot(fromSnapshot, - snapshotChainManager, omSnapshotManager); - - updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, - trxnLogIndex, updatedSnapInfos); - updateSnapshotChainAndCache(omMetadataManager, fromSnapshot, - trxnLogIndex, updatedPathPreviousAndGlobalSnapshots); - // Remove and close snapshot's RocksDB instance from SnapshotCache. - ozoneManager.getOmSnapshotManager().getSnapshotCache() - .invalidate(snapTableKey); - // Update SnapshotInfoTable cache. - ozoneManager.getMetadataManager().getSnapshotInfoTable() - .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); + if (nextSnapshot != null) { + acquireLock(lockSet, nextSnapshot.getTableKey(), omMetadataManager); + } + + // Update the chain first so that it has all the necessary locks before updating deep clean. + updateSnapshotChainAndCache(lockSet, omMetadataManager, fromSnapshot, trxnLogIndex, + updatedPathPreviousAndGlobalSnapshots); + updateSnapshotInfoAndCache(nextSnapshot, omMetadataManager, trxnLogIndex, updatedSnapInfos); + // Remove and close snapshot's RocksDB instance from SnapshotCache. + omSnapshotManager.getSnapshotCache().invalidate(snapTableKey); + // Update SnapshotInfoTable cache. + omMetadataManager.getSnapshotInfoTable() + .addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), CacheValue.get(trxnLogIndex)); + } finally { + for (Triple lockKey: lockSet) { + omMetadataManager.getLock() + .releaseWriteLock(SNAPSHOT_LOCK, lockKey.getLeft(), lockKey.getMiddle(), lockKey.getRight()); + } + } } omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), @@ -120,20 +157,41 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, return omClientResponse; } + private void acquireLock(Set> lockSet, String snapshotTableKey, + OMMetadataManager omMetadataManager) throws IOException { + SnapshotInfo snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); + + // It should not be the case that lock is required for non-existing snapshot. + if (snapshotInfo == null) { + LOG.error("Snapshot: '{}' doesn't not exist in snapshot table.", snapshotTableKey); + throw new OMException("Snapshot: '{" + snapshotTableKey + "}' doesn't not exist in snapshot table.", + OMException.ResultCodes.FILE_NOT_FOUND); + } + Triple lockKey = Triple.of(snapshotInfo.getVolumeName(), snapshotInfo.getBucketName(), + snapshotInfo.getName()); + if (!lockSet.contains(lockKey)) { + mergeOmLockDetails(omMetadataManager.getLock() + .acquireWriteLock(SNAPSHOT_LOCK, lockKey.getLeft(), lockKey.getMiddle(), lockKey.getRight())); + lockSet.add(lockKey); + } + } + private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, OmMetadataManagerImpl omMetadataManager, long trxnLogIndex, - Map updatedSnapInfos) { + Map updatedSnapInfos) throws IOException { if (snapInfo != null) { + // Fetch the latest value again after acquiring lock. + SnapshotInfo updatedSnapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapInfo.getTableKey()); + // Setting next snapshot deep clean to false, Since the // current snapshot is deleted. We can potentially // reclaim more keys in the next snapshot. - snapInfo.setDeepClean(false); + updatedSnapshotInfo.setDeepClean(false); // Update table cache first - omMetadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(snapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, snapInfo)); - updatedSnapInfos.put(snapInfo.getTableKey(), snapInfo); + omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(updatedSnapshotInfo.getTableKey()), + CacheValue.get(trxnLogIndex, updatedSnapshotInfo)); + updatedSnapInfos.put(updatedSnapshotInfo.getTableKey(), updatedSnapshotInfo); } } @@ -144,6 +202,7 @@ private void updateSnapshotInfoAndCache(SnapshotInfo snapInfo, * update in DB. */ private void updateSnapshotChainAndCache( + Set> lockSet, OmMetadataManagerImpl metadataManager, SnapshotInfo snapInfo, long trxnLogIndex, @@ -155,7 +214,6 @@ private void updateSnapshotChainAndCache( SnapshotChainManager snapshotChainManager = metadataManager .getSnapshotChainManager(); - SnapshotInfo nextPathSnapInfo = null; // If the snapshot is deleted in the previous run, then the in-memory // SnapshotChainManager might throw NoSuchElementException as the snapshot @@ -171,58 +229,63 @@ private void updateSnapshotChainAndCache( return; } - // Updates next path snapshot's previous snapshot ID + String nextPathSnapshotKey = null; + if (hasNextPathSnapshot) { UUID nextPathSnapshotId = snapshotChainManager.nextPathSnapshot( snapInfo.getSnapshotPath(), snapInfo.getSnapshotId()); - - String snapshotTableKey = snapshotChainManager + nextPathSnapshotKey = snapshotChainManager .getTableKey(nextPathSnapshotId); - nextPathSnapInfo = metadataManager.getSnapshotInfoTable() - .get(snapshotTableKey); - if (nextPathSnapInfo != null) { - nextPathSnapInfo.setPathPreviousSnapshotId( - snapInfo.getPathPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextPathSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); - } + + // Acquire lock from the snapshot + acquireLock(lockSet, nextPathSnapshotKey, metadataManager); } - // Updates next global snapshot's previous snapshot ID + String nextGlobalSnapshotKey = null; if (hasNextGlobalSnapshot) { - UUID nextGlobalSnapshotId = - snapshotChainManager.nextGlobalSnapshot(snapInfo.getSnapshotId()); - - String snapshotTableKey = snapshotChainManager - .getTableKey(nextGlobalSnapshotId); - - SnapshotInfo nextGlobalSnapInfo = metadataManager.getSnapshotInfoTable() - .get(snapshotTableKey); - // If both next global and path snapshot are same, it may overwrite - // nextPathSnapInfo.setPathPreviousSnapshotID(), adding this check - // will prevent it. - if (nextGlobalSnapInfo != null && nextPathSnapInfo != null && - nextGlobalSnapInfo.getSnapshotId().equals( - nextPathSnapInfo.getSnapshotId())) { - nextPathSnapInfo.setGlobalPreviousSnapshotId( - snapInfo.getGlobalPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextPathSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextPathSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); - } else if (nextGlobalSnapInfo != null) { - nextGlobalSnapInfo.setGlobalPreviousSnapshotId( - snapInfo.getGlobalPreviousSnapshotId()); - metadataManager.getSnapshotInfoTable().addCacheEntry( - new CacheKey<>(nextGlobalSnapInfo.getTableKey()), - CacheValue.get(trxnLogIndex, nextGlobalSnapInfo)); - updatedPathPreviousAndGlobalSnapshots - .put(nextGlobalSnapInfo.getTableKey(), nextGlobalSnapInfo); - } + UUID nextGlobalSnapshotId = snapshotChainManager.nextGlobalSnapshot(snapInfo.getSnapshotId()); + nextGlobalSnapshotKey = snapshotChainManager.getTableKey(nextGlobalSnapshotId); + + // Acquire lock from the snapshot + acquireLock(lockSet, nextGlobalSnapshotKey, metadataManager); + } + + SnapshotInfo nextPathSnapInfo = + nextPathSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextPathSnapshotKey) : null; + + SnapshotInfo nextGlobalSnapInfo = + nextGlobalSnapshotKey != null ? metadataManager.getSnapshotInfoTable().get(nextGlobalSnapshotKey) : null; + + // Updates next path snapshot's previous snapshot ID + if (nextPathSnapInfo != null) { + nextPathSnapInfo.setPathPreviousSnapshotId(snapInfo.getPathPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextPathSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextPathSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); + } + + // Updates next global snapshot's previous snapshot ID + // If both next global and path snapshot are same, it may overwrite + // nextPathSnapInfo.setPathPreviousSnapshotID(), adding this check + // will prevent it. + if (nextGlobalSnapInfo != null && nextPathSnapInfo != null && + nextGlobalSnapInfo.getSnapshotId().equals(nextPathSnapInfo.getSnapshotId())) { + nextPathSnapInfo.setGlobalPreviousSnapshotId(snapInfo.getGlobalPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextPathSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextPathSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextPathSnapInfo.getTableKey(), nextPathSnapInfo); + } else if (nextGlobalSnapInfo != null) { + nextGlobalSnapInfo.setGlobalPreviousSnapshotId( + snapInfo.getGlobalPreviousSnapshotId()); + metadataManager.getSnapshotInfoTable().addCacheEntry( + new CacheKey<>(nextGlobalSnapInfo.getTableKey()), + CacheValue.get(trxnLogIndex, nextGlobalSnapInfo)); + updatedPathPreviousAndGlobalSnapshots + .put(nextGlobalSnapInfo.getTableKey(), nextGlobalSnapInfo); } snapshotChainManager.deleteSnapshot(snapInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index 35f91a13f7bb..19778973a6f8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -35,7 +35,8 @@ import java.io.IOException; -import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.INVALID_SNAPSHOT_ERROR; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FILE_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.SNAPSHOT_LOCK; /** * Updates the exclusive size of the snapshot. @@ -62,16 +63,31 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .getSetSnapshotPropertyRequest(); SnapshotInfo updatedSnapInfo = null; + String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); + boolean acquiredSnapshotLock = false; + String volumeName = null; + String bucketName = null; + String snapshotName = null; + try { - String snapshotKey = setSnapshotPropertyRequest.getSnapshotKey(); + SnapshotInfo snapshotInfo = metadataManager.getSnapshotInfoTable().get(snapshotKey); + if (snapshotInfo == null) { + LOG.error("Snapshot: '{}' doesn't not exist in snapshot table.", snapshotKey); + throw new OMException("Snapshot: '{" + snapshotKey + "}' doesn't not exist in snapshot table.", FILE_NOT_FOUND); + } + + volumeName = snapshotInfo.getVolumeName(); + bucketName = snapshotInfo.getBucketName(); + snapshotName = snapshotInfo.getName(); + + mergeOmLockDetails(metadataManager.getLock() + .acquireWriteLock(SNAPSHOT_LOCK, volumeName, bucketName, snapshotName)); + + acquiredSnapshotLock = getOmLockDetails().isLockAcquired(); + updatedSnapInfo = metadataManager.getSnapshotInfoTable() .get(snapshotKey); - if (updatedSnapInfo == null) { - LOG.error("SnapshotInfo for Snapshot: {} is not found", snapshotKey); - throw new OMException("SnapshotInfo for Snapshot: " + snapshotKey + - " is not found", INVALID_SNAPSHOT_ERROR); - } if (setSnapshotPropertyRequest.hasDeepCleanedDeletedDir()) { updatedSnapInfo.setDeepCleanedDeletedDir(setSnapshotPropertyRequest @@ -104,6 +120,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } catch (IOException ex) { omClientResponse = new OMSnapshotSetPropertyResponse( createErrorOMResponse(omResponse, ex)); + } finally { + if (acquiredSnapshotLock) { + mergeOmLockDetails(metadataManager.getLock() + .releaseWriteLock(SNAPSHOT_LOCK, volumeName, bucketName, snapshotName)); + } + if (omClientResponse != null) { + omClientResponse.setOmLockDetails(getOmLockDetails()); + } } return omClientResponse; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index e77543b1548f..1dc27cc5f6b7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -80,9 +80,9 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, OmMetadataManagerImpl metadataManager = (OmMetadataManagerImpl) omMetadataManager; - updateSnapInfo(metadataManager, batchOperation, updatedSnapInfos); updateSnapInfo(metadataManager, batchOperation, updatedPreviousAndGlobalSnapInfos); + updateSnapInfo(metadataManager, batchOperation, updatedSnapInfos); for (String dbKey: snapshotDbKeys) { // Skip the cache here because snapshot is purged from cache in OMSnapshotPurgeRequest. SnapshotInfo snapshotInfo = omMetadataManager From 7c78759c77a14cb2e9974cf732d082a5e0f798d5 Mon Sep 17 00:00:00 2001 From: Hemant Kumar Date: Thu, 4 Apr 2024 07:13:19 +0800 Subject: [PATCH 07/17] HDDS-9200. [Snapshot] Added logs and metrics for snapshot purge and set property APIs (#6453) (cherry picked from commit 3467db1b1cc581a21caeb8648587fcbf35bbfdfa) --- .../org/apache/hadoop/ozone/om/OMMetrics.java | 36 ++++++++++++++ .../snapshot/OMSnapshotPurgeRequest.java | 11 ++++- .../OMSnapshotSetPropertyRequest.java | 6 +++ ...TestOMSnapshotPurgeRequestAndResponse.java | 34 +++++++++++++ ...SnapshotSetPropertyRequestAndResponse.java | 48 ++++++++++++++++++- 5 files changed, 133 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java index ed5efbefe50c..faf0efd46433 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java @@ -74,6 +74,8 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numSnapshotLists; private @Metric MutableCounterLong numSnapshotDiffJobs; private @Metric MutableCounterLong numSnapshotInfos; + private @Metric MutableCounterLong numSnapshotPurges; + private @Metric MutableCounterLong numSnapshotSetProperties; private @Metric MutableCounterLong numGetFileStatus; private @Metric MutableCounterLong numCreateDirectory; @@ -136,6 +138,8 @@ public class OMMetrics implements OmMetadataReaderMetrics { private @Metric MutableCounterLong numSnapshotListFails; private @Metric MutableCounterLong numSnapshotDiffJobFails; private @Metric MutableCounterLong numSnapshotInfoFails; + private @Metric MutableCounterLong numSnapshotPurgeFails; + private @Metric MutableCounterLong numSnapshotSetPropertyFails; private @Metric MutableCounterLong numSnapshotActive; private @Metric MutableCounterLong numSnapshotDeleted; @@ -479,6 +483,14 @@ public void incNumSnapshotInfos() { numSnapshotInfos.incr(); } + public void incNumSnapshotPurges() { + numSnapshotPurges.incr(); + } + + public void incNumSnapshotSetProperties() { + numSnapshotSetProperties.incr(); + } + public void incNumSnapshotDiffJobs() { numSnapshotDiffJobs.incr(); } @@ -494,6 +506,15 @@ public void incNumSnapshotDiffJobFails() { public void incNumSnapshotInfoFails() { numSnapshotInfoFails.incr(); } + + public void incNumSnapshotPurgeFails() { + numSnapshotPurgeFails.incr(); + } + + public void incNumSnapshotSetPropertyFails() { + numSnapshotSetPropertyFails.incr(); + } + public void setNumSnapshotActive(long num) { long currVal = numSnapshotActive.value(); numSnapshotActive.incr(num - currVal); @@ -1290,6 +1311,14 @@ public long getNumSnapshotDiffJobs() { return numSnapshotDiffJobs.value(); } + public long getNumSnapshotPurges() { + return numSnapshotPurges.value(); + } + + public long getNumSnapshotSetProperties() { + return numSnapshotSetProperties.value(); + } + public long getNumSnapshotCreateFails() { return numSnapshotCreateFails.value(); } @@ -1314,6 +1343,13 @@ public long getNumSnapshotDeleted() { return numSnapshotDeleted.value(); } + public long getNumSnapshotPurgeFails() { + return numSnapshotPurgeFails.value(); + } + + public long getNumSnapshotSetPropertyFails() { + return numSnapshotSetPropertyFails.value(); + } public void incNumTrashRenames() { numTrashRenames.incr(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 120083869ee6..b68e7f6079b2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -21,8 +21,8 @@ import org.apache.commons.lang3.tuple.Triple; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.exceptions.OMException; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -67,6 +67,8 @@ public OMSnapshotPurgeRequest(OMRequest omRequest) { @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex) { + OMMetrics omMetrics = ozoneManager.getMetrics(); + OmSnapshotManager omSnapshotManager = ozoneManager.getOmSnapshotManager(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); @@ -149,9 +151,16 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapInfos, updatedPathPreviousAndGlobalSnapshots); + + omMetrics.incNumSnapshotPurges(); + LOG.info("Successfully executed snapshotPurgeRequest: {{}} along with updating deep clean flags for " + + "snapshots: {} and global and previous for snapshots:{}.", + snapshotPurgeRequest, updatedSnapInfos.keySet(), updatedPathPreviousAndGlobalSnapshots.keySet()); } catch (IOException ex) { omClientResponse = new OMSnapshotPurgeResponse( createErrorOMResponse(omResponse, ex)); + omMetrics.incNumSnapshotPurgeFails(); + LOG.error("Failed to execute snapshotPurgeRequest:{{}}.", snapshotPurgeRequest, ex); } return omClientResponse; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index 19778973a6f8..3d4873e67355 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.snapshot; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -52,6 +53,7 @@ public OMSnapshotSetPropertyRequest(OMRequest omRequest) { @Override public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long trxnLogIndex) { + OMMetrics omMetrics = ozoneManager.getMetrics(); OMClientResponse omClientResponse = null; OMMetadataManager metadataManager = ozoneManager.getMetadataManager(); @@ -117,9 +119,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, CacheValue.get(trxnLogIndex, updatedSnapInfo)); omClientResponse = new OMSnapshotSetPropertyResponse( omResponse.build(), updatedSnapInfo); + omMetrics.incNumSnapshotSetProperties(); + LOG.info("Successfully executed snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequest); } catch (IOException ex) { omClientResponse = new OMSnapshotSetPropertyResponse( createErrorOMResponse(omResponse, ex)); + omMetrics.incNumSnapshotSetPropertyFails(); + LOG.error("Failed to execute snapshotSetPropertyRequest: {{}}.", setSnapshotPropertyRequest, ex); } finally { if (acquiredSnapshotLock) { mergeOmLockDetails(metadataManager.getLock() diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index 54dd96e6c883..b666caf21d95 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.hdds.utils.db.RDBStore; +import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.om.IOmMetadataReader; @@ -65,6 +66,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -235,6 +237,8 @@ private void purgeSnapshots(OMRequest snapshotPurgeRequest) @Test public void testValidateAndUpdateCache() throws Exception { + long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); List snapshotDbKeysToPurge = createSnapshots(10); assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); @@ -262,6 +266,36 @@ public void testValidateAndUpdateCache() throws Exception { for (Path checkpoint : checkpointPaths) { assertFalse(Files.exists(checkpoint)); } + assertEquals(initialSnapshotPurgeCount + 1, omMetrics.getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount, omMetrics.getNumSnapshotPurgeFails()); + } + + /** + * This test is mainly to validate metrics and error code. + */ + @Test + public void testValidateAndUpdateCacheFailure() throws Exception { + long initialSnapshotPurgeCount = omMetrics.getNumSnapshotPurges(); + long initialSnapshotPurgeFailCount = omMetrics.getNumSnapshotPurgeFails(); + + List snapshotDbKeysToPurge = createSnapshots(10); + + OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); + Table mockedSnapshotInfoTable = mock(Table.class); + + when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); + when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); + when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + + OMRequest snapshotPurgeRequest = createPurgeKeysRequest(snapshotDbKeysToPurge); + OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); + + OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) + omSnapshotPurgeRequest.validateAndUpdateCache(ozoneManager, 200L); + + assertEquals(INTERNAL_ERROR, omSnapshotPurgeResponse.getOMResponse().getStatus()); + assertEquals(initialSnapshotPurgeCount, omMetrics.getNumSnapshotPurges()); + assertEquals(initialSnapshotPurgeFailCount + 1, omMetrics.getNumSnapshotPurgeFails()); } // TODO: clean up: Do we this test after diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java index b1dcb2ad81d5..8c732efec575 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotSetPropertyRequestAndResponse.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; @@ -49,6 +50,7 @@ import java.util.List; import java.util.UUID; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.INTERNAL_ERROR; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.mockito.ArgumentMatchers.anyString; @@ -63,7 +65,7 @@ public class TestOMSnapshotSetPropertyRequestAndResponse { private BatchOperation batchOperation; private OzoneManager ozoneManager; private OMMetadataManager omMetadataManager; - + private OMMetrics omMetrics; private String volumeName; private String bucketName; private String snapName; @@ -72,6 +74,7 @@ public class TestOMSnapshotSetPropertyRequestAndResponse { @BeforeEach void setup(@TempDir File testDir) throws Exception { + omMetrics = OMMetrics.create(); ozoneManager = Mockito.mock(OzoneManager.class); OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); when(lvm.isAllowed(anyString())).thenReturn(true); @@ -85,6 +88,7 @@ void setup(@TempDir File testDir) throws Exception { omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration, ozoneManager); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); + when(ozoneManager.getMetrics()).thenReturn(omMetrics); volumeName = UUID.randomUUID().toString(); bucketName = UUID.randomUUID().toString(); @@ -95,6 +99,9 @@ void setup(@TempDir File testDir) throws Exception { @Test public void testValidateAndUpdateCache() throws IOException { + long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + createSnapshotDataForTest(); assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); List snapshotUpdateSizeRequests = @@ -121,6 +128,9 @@ public void testValidateAndUpdateCache() throws IOException { omMetadataManager.getStore().commitBatchOperation(batchOperation); } + assertEquals(initialSnapshotSetPropertyCount + snapshotUpdateSizeRequests.size(), + omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount, omMetrics.getNumSnapshotSetPropertyFails()); // Check if the exclusive size is set. try (TableIterator> iterator = omMetadataManager.getSnapshotInfoTable().iterator()) { @@ -135,6 +145,42 @@ public void testValidateAndUpdateCache() throws IOException { } } + /** + * This test is mainly to validate metrics and error code. + */ + @Test + public void testValidateAndUpdateCacheFailure() throws IOException { + long initialSnapshotSetPropertyCount = omMetrics.getNumSnapshotSetProperties(); + long initialSnapshotSetPropertyFailCount = omMetrics.getNumSnapshotSetPropertyFails(); + + createSnapshotDataForTest(); + assertFalse(omMetadataManager.getSnapshotInfoTable().isEmpty()); + List snapshotUpdateSizeRequests = createSnapshotUpdateSizeRequest(); + + OmMetadataManagerImpl mockedMetadataManager = mock(OmMetadataManagerImpl.class); + Table mockedSnapshotInfoTable = mock(Table.class); + + when(mockedSnapshotInfoTable.get(anyString())).thenThrow(new IOException("Injected fault error.")); + when(mockedMetadataManager.getSnapshotInfoTable()).thenReturn(mockedSnapshotInfoTable); + when(ozoneManager.getMetadataManager()).thenReturn(mockedMetadataManager); + + for (OMRequest omRequest: snapshotUpdateSizeRequests) { + OMSnapshotSetPropertyRequest omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(omRequest); + OMRequest modifiedOmRequest = omSnapshotSetPropertyRequest.preExecute(ozoneManager); + omSnapshotSetPropertyRequest = new OMSnapshotSetPropertyRequest(modifiedOmRequest); + + // Validate and Update Cache + OMSnapshotSetPropertyResponse omSnapshotSetPropertyResponse = (OMSnapshotSetPropertyResponse) + omSnapshotSetPropertyRequest.validateAndUpdateCache(ozoneManager, 200L); + + assertEquals(INTERNAL_ERROR, omSnapshotSetPropertyResponse.getOMResponse().getStatus()); + } + + assertEquals(initialSnapshotSetPropertyCount, omMetrics.getNumSnapshotSetProperties()); + assertEquals(initialSnapshotSetPropertyFailCount + snapshotUpdateSizeRequests.size(), + omMetrics.getNumSnapshotSetPropertyFails()); + } + private void assertCacheValues(String dbKey) { CacheValue cacheValue = omMetadataManager .getSnapshotInfoTable() From 22198332e6e15b9de23105f698d28ddc61b59e4c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 9 Apr 2024 11:39:21 +0200 Subject: [PATCH 08/17] HDDS-10669. Bump axios to 0.28.0 (#6242) (cherry picked from commit 0f43dbcee788cd4d95ecb1634b092f830a7e0704) --- .../webapps/recon/ozone-recon-web/package.json | 2 +- .../webapps/recon/ozone-recon-web/pnpm-lock.yaml | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json index 3d1528fccb3e..66e26959caff 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -16,7 +16,7 @@ "ag-charts-community": "^7.3.0", "ag-charts-react": "^7.3.0", "antd": "^3.26.20", - "axios": "^0.27.2", + "axios": "^0.28.0", "babel-jest": "^24.9.0", "babel-plugin-import": "^1.13.8", "classnames": "^2.3.2", diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index 1dab3d583519..be53b06fb50d 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -17,7 +17,7 @@ specifiers: ag-charts-community: ^7.3.0 ag-charts-react: ^7.3.0 antd: ^3.26.20 - axios: ^0.27.2 + axios: ^0.28.0 babel-jest: ^24.9.0 babel-plugin-import: ^1.13.8 classnames: ^2.3.2 @@ -61,7 +61,7 @@ dependencies: ag-charts-community: 7.3.0 ag-charts-react: 7.3.0_4uflhkpzmxcxyxkuqg2ofty3gq antd: 3.26.20_wcqkhtmu7mswc6yz4uyexck3ty - axios: 0.27.2 + axios: 0.28.0 babel-jest: 24.9.0_@babel+core@7.22.11 babel-plugin-import: 1.13.8 classnames: 2.3.2 @@ -3891,11 +3891,12 @@ packages: /aws4/1.12.0: resolution: {integrity: sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg==} - /axios/0.27.2: - resolution: {integrity: sha512-t+yRIyySRTp/wua5xEr+z1q60QmLq8ABsS5O9Me1AsE5dfKqgnCFzwiCZZ/cGNd1lq4/7akDWMxdhVlucjmnOQ==} + /axios/0.28.0: + resolution: {integrity: sha512-Tu7NYoGY4Yoc7I+Npf9HhUMtEEpV7ZiLH9yndTCoNhcpBH0kwcvFbzYN9/u5QKI5A6uefjsNNWaz5olJVYS62Q==} dependencies: follow-redirects: 1.15.6 form-data: 4.0.0 + proxy-from-env: 1.1.0 transitivePeerDependencies: - debug dev: false @@ -13291,6 +13292,10 @@ packages: forwarded: 0.2.0 ipaddr.js: 1.9.1 + /proxy-from-env/1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + dev: false + /prr/1.0.1: resolution: {integrity: sha512-yPw4Sng1gWghHQWj0B3ZggWUm4qVbPwPFcRG8KyxiU7J2OHFSoEHKS+EZ3fv5l1t9CyCiop6l/ZYeWbrgoQejw==} dev: false From 542adc7b3e58b4db2e11fdefb5d6ba94041a6530 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 06:19:16 +0200 Subject: [PATCH 09/17] HDDS-10672. Bump express to 4.19.2 (#6442) (cherry picked from commit 06c0d81af229d66cbb0464e74dda67b73095d106) --- .../recon/ozone-recon-web/pnpm-lock.yaml | 66 +++++-------------- 1 file changed, 18 insertions(+), 48 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index be53b06fb50d..ab894fc23288 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -4260,8 +4260,8 @@ packages: /bn.js/5.2.1: resolution: {integrity: sha512-eXRvHzWyYPBuB4NBy0cmYQjGitUrtqwbvlzP3G6VFnNRbsZQIxQ10PbKKHt8gZ/HW/D/747aDl+QkDqg3KQLMQ==} - /body-parser/1.20.1: - resolution: {integrity: sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==} + /body-parser/1.20.2: + resolution: {integrity: sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} dependencies: bytes: 3.1.2 @@ -4273,40 +4273,20 @@ packages: iconv-lite: 0.4.24 on-finished: 2.4.1 qs: 6.11.0 - raw-body: 2.5.1 + raw-body: 2.5.2 type-is: 1.6.18 unpipe: 1.0.0 transitivePeerDependencies: - supports-color dev: true - /body-parser/1.20.1_supports-color@6.1.0: - resolution: {integrity: sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw==} - engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} - dependencies: - bytes: 3.1.2 - content-type: 1.0.5 - debug: 2.6.9_supports-color@6.1.0 - depd: 2.0.0 - destroy: 1.2.0 - http-errors: 2.0.0 - iconv-lite: 0.4.24 - on-finished: 2.4.1 - qs: 6.11.0 - raw-body: 2.5.1 - type-is: 1.6.18 - unpipe: 1.0.0 - transitivePeerDependencies: - - supports-color - dev: false - - /body-parser/1.20.2: + /body-parser/1.20.2_supports-color@6.1.0: resolution: {integrity: sha512-ml9pReCu3M61kGlqoTm2umSXTlRTuGTx0bfYj+uIUKKYycG5NtSbeetV3faSU6R7ajOPw0g/J1PvK4qNy7s5bA==} engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} dependencies: bytes: 3.1.2 content-type: 1.0.5 - debug: 2.6.9 + debug: 2.6.9_supports-color@6.1.0 depd: 2.0.0 destroy: 1.2.0 http-errors: 2.0.0 @@ -4318,7 +4298,7 @@ packages: unpipe: 1.0.0 transitivePeerDependencies: - supports-color - dev: true + dev: false /bonjour/3.5.0: resolution: {integrity: sha512-RaVTblr+OnEli0r/ud8InrU7D+G0y6aJhlxaLa6Pwty4+xoxboF1BsUI45tujvRpbj9dQVoglChqonGAsjEBYg==} @@ -5255,8 +5235,8 @@ packages: /cookie-signature/1.0.6: resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} - /cookie/0.5.0: - resolution: {integrity: sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==} + /cookie/0.6.0: + resolution: {integrity: sha512-U71cyTamuh1CRNCfpGY6to28lxvNwPG4Guz/EVjgf3Jmzv0vlDp1atT9eS5dDjMYHucpHbWns6Lwf3BKz6svdw==} engines: {node: '>= 0.6'} /copy-anything/2.0.6: @@ -7346,16 +7326,16 @@ packages: - supports-color dev: true - /express/4.18.2: - resolution: {integrity: sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==} + /express/4.19.2: + resolution: {integrity: sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==} engines: {node: '>= 0.10.0'} dependencies: accepts: 1.3.8 array-flatten: 1.1.1 - body-parser: 1.20.1 + body-parser: 1.20.2 content-disposition: 0.5.4 content-type: 1.0.5 - cookie: 0.5.0 + cookie: 0.6.0 cookie-signature: 1.0.6 debug: 2.6.9 depd: 2.0.0 @@ -7385,16 +7365,16 @@ packages: - supports-color dev: true - /express/4.18.2_supports-color@6.1.0: - resolution: {integrity: sha512-5/PsL6iGPdfQ/lKM1UuielYgv3BUoJfz1aUwU9vHZ+J7gyvwdQXFEBIEIaxeGf0GIcreATNyBExtalisDbuMqQ==} + /express/4.19.2_supports-color@6.1.0: + resolution: {integrity: sha512-5T6nhjsT+EOMzuck8JjBHARTHfMht0POzlA60WV2pMD3gyXw2LZnZ+ueGdNxG+0calOJcWKbpFcuzLZ91YWq9Q==} engines: {node: '>= 0.10.0'} dependencies: accepts: 1.3.8 array-flatten: 1.1.1 - body-parser: 1.20.1_supports-color@6.1.0 + body-parser: 1.20.2_supports-color@6.1.0 content-disposition: 0.5.4 content-type: 1.0.5 - cookie: 0.5.0 + cookie: 0.6.0 cookie-signature: 1.0.6 debug: 2.6.9_supports-color@6.1.0 depd: 2.0.0 @@ -10452,7 +10432,7 @@ packages: connect-pause: 0.1.1 cors: 2.8.5 errorhandler: 1.5.1 - express: 4.18.2 + express: 4.19.2 express-urlrewrite: 1.4.0 json-parse-helpfulerror: 1.0.3 lodash: 4.17.21 @@ -13431,15 +13411,6 @@ packages: big-rat: 1.0.4 dev: false - /raw-body/2.5.1: - resolution: {integrity: sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig==} - engines: {node: '>= 0.8'} - dependencies: - bytes: 3.1.2 - http-errors: 2.0.0 - iconv-lite: 0.4.24 - unpipe: 1.0.0 - /raw-body/2.5.2: resolution: {integrity: sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==} engines: {node: '>= 0.8'} @@ -13448,7 +13419,6 @@ packages: http-errors: 2.0.0 iconv-lite: 0.4.24 unpipe: 1.0.0 - dev: true /rc-align/2.4.5: resolution: {integrity: sha512-nv9wYUYdfyfK+qskThf4BQUSIadeI/dCsfaMZfNEoxm9HwOIioQ+LyqmMK6jWHAZQgOzMLaqawhuBXlF63vgjw==} @@ -16832,7 +16802,7 @@ packages: connect-history-api-fallback: 1.6.0 debug: 4.3.4_supports-color@6.1.0 del: 4.1.1 - express: 4.18.2_supports-color@6.1.0 + express: 4.19.2_supports-color@6.1.0 html-entities: 1.4.0 http-proxy-middleware: 0.19.1_tmpgdztspuwvsxzgjkhoqk7duq import-local: 2.0.0 From 70abad075c580684a4fb4abc8f1ca9e4499023a8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 09:23:46 +0200 Subject: [PATCH 10/17] HDDS-10673. Bump es5-ext to 0.10.64 (#6284) (cherry picked from commit 2aa77f70a36b8ff0c816285ecb0a9d2e0404f5ac) --- .../recon/ozone-recon-web/pnpm-lock.yaml | 28 +++++++++++++++---- 1 file changed, 23 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index ab894fc23288..f66fd2acab13 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -5726,7 +5726,7 @@ packages: /d/1.0.1: resolution: {integrity: sha512-m62ShEObQ39CfralilEQRjH6oAMtNCV1xJyEx5LpRYUVN+EviphDgUc/F3hnYbADmkiNs67Y+3ylmlG7Lnu+FA==} dependencies: - es5-ext: 0.10.62 + es5-ext: 0.10.64 type: 1.2.0 dev: false @@ -6507,13 +6507,14 @@ packages: is-date-object: 1.0.5 is-symbol: 1.0.4 - /es5-ext/0.10.62: - resolution: {integrity: sha512-BHLqn0klhEpnOKSrzn/Xsz2UIW8j+cGmo9JLzr8BiUapV8hPL9+FliFqjwr9ngW7jWdnxv6eO+/LqyhJVqgrjA==} + /es5-ext/0.10.64: + resolution: {integrity: sha512-p2snDhiLaXe6dahss1LddxqEm+SkuDvV8dnIQG0MWjyHpcMNfXKPE+/Cc0y+PhxJX3A4xGNeFCj5oc0BUh6deg==} engines: {node: '>=0.10'} requiresBuild: true dependencies: es6-iterator: 2.0.3 es6-symbol: 3.1.3 + esniff: 2.0.1 next-tick: 1.1.0 dev: false @@ -6521,7 +6522,7 @@ packages: resolution: {integrity: sha512-zw4SRzoUkd+cl+ZoE15A9o1oQd920Bb0iOJMQkQhl3jNc03YqVjAhG7scf9C5KWRU/R13Orf588uCC6525o02g==} dependencies: d: 1.0.1 - es5-ext: 0.10.62 + es5-ext: 0.10.64 es6-symbol: 3.1.3 dev: false @@ -6540,7 +6541,7 @@ packages: resolution: {integrity: sha512-p5um32HOTO1kP+w7PRnB+5lQ43Z6muuMuIMffvDN8ZB4GcnjLBV6zGStpbASIMk4DCAvEaamhe2zhyCb/QXXsA==} dependencies: d: 1.0.1 - es5-ext: 0.10.62 + es5-ext: 0.10.64 es6-iterator: 2.0.3 es6-symbol: 3.1.3 dev: false @@ -7162,6 +7163,16 @@ packages: transitivePeerDependencies: - supports-color + /esniff/2.0.1: + resolution: {integrity: sha512-kTUIGKQ/mDPFoJ0oVfcmyJn4iBDRptjNVIzwIFR7tqWXdVI9xfA2RMwY/gbSpJG3lkdWNEjLap/NqVHZiJsdfg==} + engines: {node: '>=0.10'} + dependencies: + d: 1.0.1 + es5-ext: 0.10.64 + event-emitter: 0.3.5 + type: 2.7.2 + dev: false + /espree/6.2.1: resolution: {integrity: sha512-ysCxRQY3WaXJz9tdbWOwuWr5Y/XrPTGX9Kiz3yoUXwW0VZ4w30HTkQLaGx/+ttFjF8i+ACbArnB4ce68a9m5hw==} engines: {node: '>=6.0.0'} @@ -7216,6 +7227,13 @@ packages: resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} engines: {node: '>= 0.6'} + /event-emitter/0.3.5: + resolution: {integrity: sha512-D9rRn9y7kLPnJ+hMq7S/nhvoKwwvVJahBi2BPmx3bvbsEdK3W9ii8cBSGjP+72/LnM4n6fo3+dkCX5FeTQruXA==} + dependencies: + d: 1.0.1 + es5-ext: 0.10.64 + dev: false + /eventemitter3/4.0.7: resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} dev: false From 4e78ea87c9f375009ee8e10931ba58581322a860 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 10:50:32 +0200 Subject: [PATCH 11/17] HDDS-10674. Bump ip to 1.1.9 (#6243) (cherry picked from commit 32e6a31baa5bbd192642f2cf7b9a77b711233494) --- .../webapps/recon/ozone-recon-web/pnpm-lock.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index f66fd2acab13..452b2e415642 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -6091,7 +6091,7 @@ packages: /dns-packet/1.3.4: resolution: {integrity: sha512-BQ6F4vycLXBvdrJZ6S3gZewt6rcrks9KBgM9vrhW+knGRqc8uEdT7fuCwloc7nny5xNoMJ17HGH0R/6fpo8ECA==} dependencies: - ip: 1.1.8 + ip: 1.1.9 safe-buffer: 5.2.1 dev: false @@ -9260,8 +9260,8 @@ packages: engines: {node: '>=4'} dev: false - /ip/1.1.8: - resolution: {integrity: sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==} + /ip/1.1.9: + resolution: {integrity: sha512-cyRxvOEpNHNtchU3Ln9KC/auJgup87llfQpQ+t5ghoC/UhL16SWzbueiCsdTnWmqAWl7LadfuwhlqmtOaqMHdQ==} dev: false /ipaddr.js/1.9.1: @@ -16825,7 +16825,7 @@ packages: http-proxy-middleware: 0.19.1_tmpgdztspuwvsxzgjkhoqk7duq import-local: 2.0.0 internal-ip: 4.3.0 - ip: 1.1.8 + ip: 1.1.9 is-absolute-url: 3.0.3 killable: 1.0.1 loglevel: 1.8.1 From bd72ade07f1666326be3fbaeea3b820028850462 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 16:21:28 +0200 Subject: [PATCH 12/17] HDDS-10676. Bump browserify-sign to 4.2.3 (#6509) (cherry picked from commit 14f2452b747ace704edc21eb6b82e41687de35bf) --- .../recon/ozone-recon-web/pnpm-lock.yaml | 50 ++++++++++++++++--- 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index 452b2e415642..4bea4bab49ee 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -3792,6 +3792,13 @@ packages: resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==} dev: false + /asn1.js/4.10.1: + resolution: {integrity: sha512-p32cOF5q0Zqs9uBiONKYLm6BClCoBCM5O9JfeUSlnQLBTxYdTK+pW+nXflm8UkKd2UYlEbYz5qEi0JuZR9ckSw==} + dependencies: + bn.js: 4.12.0 + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + /asn1.js/5.4.1: resolution: {integrity: sha512-+I//4cYPccV8LdmBLiX8CYvf9Sp3vQsrqu2QNXRcrbiWvcx/UdlFiqUJJzxRQxgsZmvhXhn4cSKeSmoFjVdupA==} dependencies: @@ -4445,17 +4452,19 @@ packages: bn.js: 5.2.1 randombytes: 2.1.0 - /browserify-sign/4.2.1: - resolution: {integrity: sha512-/vrA5fguVAKKAVTNJjgSm1tRQDHUU6DbwO9IROu/0WAzC8PKhucDSh18J0RMvVeHAn5puMd+QHC2erPRNf8lmg==} + /browserify-sign/4.2.3: + resolution: {integrity: sha512-JWCZW6SKhfhjJxO8Tyiiy+XYB7cqd2S5/+WeYHsKdNKFlCBhKbblba1A/HN/90YwtxKc8tCErjffZl++UNmGiw==} + engines: {node: '>= 0.12'} dependencies: bn.js: 5.2.1 browserify-rsa: 4.1.0 create-hash: 1.2.0 create-hmac: 1.1.7 - elliptic: 6.5.4 + elliptic: 6.5.5 + hash-base: 3.0.4 inherits: 2.0.4 - parse-asn1: 5.1.6 - readable-stream: 3.6.2 + parse-asn1: 5.1.7 + readable-stream: 2.3.8 safe-buffer: 5.2.1 /browserify-zlib/0.2.0: @@ -5402,7 +5411,7 @@ packages: resolution: {integrity: sha512-fz4spIh+znjO2VjL+IdhEpRJ3YN6sMzITSBijk6FK2UvTqruSQW+/cCZTSNsMiZNvUeq0CqurF+dAbyiGOY6Wg==} dependencies: browserify-cipher: 1.0.1 - browserify-sign: 4.2.1 + browserify-sign: 4.2.3 create-ecdh: 4.0.4 create-hash: 1.2.0 create-hmac: 1.1.7 @@ -6330,6 +6339,17 @@ packages: minimalistic-assert: 1.0.1 minimalistic-crypto-utils: 1.0.1 + /elliptic/6.5.5: + resolution: {integrity: sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw==} + dependencies: + bn.js: 4.12.0 + brorand: 1.1.0 + hash.js: 1.1.7 + hmac-drbg: 1.0.1 + inherits: 2.0.4 + minimalistic-assert: 1.0.1 + minimalistic-crypto-utils: 1.0.1 + /emoji-regex/7.0.3: resolution: {integrity: sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==} @@ -8807,6 +8827,13 @@ packages: dependencies: function-bind: 1.1.1 + /hash-base/3.0.4: + resolution: {integrity: sha512-EeeoJKjTyt868liAlVmcv2ZsUfGHlE3Q+BICOXcZiwN3osr5Q/zFGYmTJpoIzuaSTAwndFy+GqhEwlU4L3j4Ow==} + engines: {node: '>=4'} + dependencies: + inherits: 2.0.4 + safe-buffer: 5.2.1 + /hash-base/3.1.0: resolution: {integrity: sha512-1nmYp/rhMDiE7AYkDw+lLwlAzz0AntGIe51F3RfFfEqyQ3feY2eI/NcwC6umIQVOASPMsWJLJScWKSSvzL9IVA==} engines: {node: '>=4'} @@ -12099,6 +12126,17 @@ packages: pbkdf2: 3.1.2 safe-buffer: 5.2.1 + /parse-asn1/5.1.7: + resolution: {integrity: sha512-CTM5kuWR3sx9IFamcl5ErfPl6ea/N8IYwiJ+vpeB2g+1iknv7zBl5uPwbMbRVznRVbrNY6lGuDoE5b30grmbqg==} + engines: {node: '>= 0.10'} + dependencies: + asn1.js: 4.10.1 + browserify-aes: 1.2.0 + evp_bytestokey: 1.0.3 + hash-base: 3.0.4 + pbkdf2: 3.1.2 + safe-buffer: 5.2.1 + /parse-json/2.2.0: resolution: {integrity: sha512-QR/GGaKCkhwk1ePQNYDRKYZ3mwU9ypsKhB0XyFnLQdomyEqk3e8wpW3V5Jp88zbxK4n5ST1nqo+g9juTpownhQ==} engines: {node: '>=0.10.0'} From 487d40fed53f88af6435a68814641a2288657bcb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 10 Apr 2024 19:25:45 +0200 Subject: [PATCH 13/17] HDDS-10677. Bump plotly.js to 2.25.2 (#5915) (cherry picked from commit 4aee5624072a7b09882efb8e809fe3f1fdd34be1) --- .../recon/ozone-recon-web/package.json | 2 +- .../recon/ozone-recon-web/pnpm-lock.yaml | 1092 ++--------------- 2 files changed, 99 insertions(+), 995 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json index 66e26959caff..41987c00ef35 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -25,7 +25,7 @@ "less": "^3.13.1", "less-loader": "^5.0.0", "moment": "^2.29.4", - "plotly.js": "^1.58.5", + "plotly.js": "^2.25.2", "pretty-ms": "^5.1.0", "react": "^16.8.6", "react-app-rewired": "^2.2.1", diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index 4bea4bab49ee..957a0ed5d152 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -35,7 +35,7 @@ specifiers: less-loader: ^5.0.0 moment: ^2.29.4 npm-run-all: ^4.1.5 - plotly.js: ^1.58.5 + plotly.js: ^2.25.2 pretty-ms: ^5.1.0 react: ^16.8.6 react-app-rewired: ^2.2.1 @@ -70,12 +70,12 @@ dependencies: less: 3.13.1 less-loader: 5.0.0_less@3.13.1 moment: 2.29.4 - plotly.js: 1.58.5 + plotly.js: 2.25.2 pretty-ms: 5.1.0 react: 16.14.0 react-app-rewired: 2.2.1_react-scripts@3.4.4 react-dom: 16.14.0_react@16.14.0 - react-plotly.js: 2.6.0_f6dluzp62qf57yw3gl4ocsg3e4 + react-plotly.js: 2.6.0_qtjenpcawcnnxnr626ndcvhi4u react-router: 5.3.4_react@16.14.0 react-router-dom: 5.3.4_react@16.14.0 react-scripts: 3.4.4_bo7u2dcgnntwwyyxmecoaqdaee @@ -100,14 +100,6 @@ devDependencies: packages: - /3d-view/2.0.1: - resolution: {integrity: sha512-YSLRHXNpSziaaiK2R0pI5+JKguoJVbtWmIv9YyBFtl0+q42kQwJB/JUulbFR/1zYFm58ifjKQ6kVdgZ6tyKtCA==} - dependencies: - matrix-camera-controller: 2.1.4 - orbit-camera-controller: 4.0.0 - turntable-camera-controller: 3.0.1 - dev: false - /@ampproject/remapping/2.2.1: resolution: {integrity: sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==} engines: {node: '>=6.0.0'} @@ -239,7 +231,7 @@ packages: gensync: 1.0.0-beta.2 json5: 2.2.3 lodash: 4.17.21 - resolve: 1.15.0 + resolve: 1.22.4 semver: 5.7.2 source-map: 0.5.7 transitivePeerDependencies: @@ -351,7 +343,7 @@ packages: '@babel/helper-plugin-utils': 7.22.5 debug: 4.3.4 lodash.debounce: 4.0.8 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - supports-color dev: false @@ -1842,7 +1834,7 @@ packages: '@babel/core': 7.9.0 '@babel/helper-module-imports': 7.22.5 '@babel/helper-plugin-utils': 7.22.5 - resolve: 1.15.0 + resolve: 1.22.4 semver: 5.7.2 dev: false @@ -2712,6 +2704,10 @@ packages: d3-shape: 1.3.7 dev: false + /@plotly/d3/3.8.1: + resolution: {integrity: sha512-x49ThEu1FRA00kTso4Jdfyf2byaCPLBGmLjAYQz5OzaPyLUhHesX3/Nfv2OHEhynhdy2UB39DLXq6thYe2L2kg==} + dev: false + /@plotly/point-cluster/3.1.9: resolution: {integrity: sha512-MwaI6g9scKf68Orpr1pHZ597pYx9uP8UEFXLPbsCmuw3a84obwz6pnMXGc90VhgDNeNiLEdlmuK7CPo+5PIxXw==} dependencies: @@ -2727,6 +2723,10 @@ packages: pick-by-alias: 1.2.0 dev: false + /@plotly/regl/2.1.2: + resolution: {integrity: sha512-Mdk+vUACbQvjd0m/1JJjOOafmkp/EpmHjISsopEz5Av44CBq7rPC05HHNbYGKVyNUF2zmEoBS/TT0pd0SPFFyw==} + dev: false + /@sinclair/typebox/0.27.8: resolution: {integrity: sha512-+Fj43pSMwJs4KRrH/938Uf+uAELIgVBmQzg/q1YG10djyfA3TnrU8N8XzqCh/okZdszqBQTZf96idMfE5lnwTA==} dev: false @@ -3290,14 +3290,6 @@ packages: resolution: {integrity: sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==} dev: false - /a-big-triangle/1.0.3: - resolution: {integrity: sha512-AboEtoSPueZisde3Vr+7VRSfUIWBSGZUOtW3bJrOZXgIyK7dNNDdpDmOKJjg5GmJLlRKUONWV8lMgTK8MBhQWw==} - dependencies: - gl-buffer: 2.1.2 - gl-vao: 1.3.0 - weak-map: 1.0.8 - dev: false - /abab/2.0.6: resolution: {integrity: sha512-j2afSsaIENvHZN2B8GOpF566vZ5WVk5opAiMTvWgaQT8DkbOqsTfvNAvHoRGU2zzP8cPoqys+xHTRDWW8L+/BA==} dev: false @@ -3361,12 +3353,6 @@ packages: object-assign: 4.1.1 dev: false - /add-line-numbers/1.0.1: - resolution: {integrity: sha512-w+2a1malCvWwACQFBpZ5/uwmHGaGYT+aGIxA8ONF5vlhe6X/gD3eR8qVoLWa+5nnWAOq2LuPbrqDYqj1pn0WMg==} - dependencies: - pad-left: 1.0.2 - dev: false - /address/1.1.2: resolution: {integrity: sha512-aT6camzM4xEA54YVJYSqxz1kv4IHnQZRtThJJHhUMRExaU5spC7jX5ugSwTaTgJliIgs4VhZOk7htClvQ/LmRA==} engines: {node: '>= 0.12.0'} @@ -3380,12 +3366,6 @@ packages: regex-parser: 2.2.11 dev: false - /affine-hull/1.0.0: - resolution: {integrity: sha512-3QNG6+vFAwJvSZHsJYDJ/mt1Cxx9n5ffA+1Ohmj7udw0JuRgUVIXK0P9N9pCMuEdS3jCNt8GFX5q2fChq+GO3Q==} - dependencies: - robust-orientation: 1.2.1 - dev: false - /ag-charts-community/7.3.0: resolution: {integrity: sha512-118U6YsCMia6iZHaN06zT19rr2SYa92WB73pMVCKQlp2H3c19uKQ6Y6DfKG/nIfNUzFXZLHBwKIdZXsMWJdZww==} dev: false @@ -3439,20 +3419,6 @@ packages: resolution: {integrity: sha512-0V/PkoculFl5+0Lp47JoxUcO0xSxhIBvm+BxHdD/OgXNmdRpRHCFnKVuUoWyS9EzQP+otSGv0m9Lb4yVkQBn2A==} dev: false - /alpha-complex/1.0.0: - resolution: {integrity: sha512-rhsjKfc9tMF5QZc0NhKz/zFzMu2rvHxCP/PyJtEmMkV7M848YjIoQGDlNGp+vTqxXjA8wAY2OxgR1K54C2Awkg==} - dependencies: - circumradius: 1.0.0 - delaunay-triangulate: 1.1.6 - dev: false - - /alpha-shape/1.0.0: - resolution: {integrity: sha512-/V+fmmjtSA2yfQNq8iEqBxnPbjcOMXpM9Ny+yE/O7aLR7Q1oPzUc9bHH0fPHS3hUugUL/dHzTis6l3JirYOS/w==} - dependencies: - alpha-complex: 1.0.0 - simplicial-complex-boundary: 1.0.1 - dev: false - /alphanum-sort/1.0.2: resolution: {integrity: sha512-0FcBfdcmaumGPQ0qPn7Q5qTgz/ooXgIyp1rf8ik5bGX8mpE2YHjC0P/eyQvxu1GURYQgq9ozf2mteQ5ZD9YiyQ==} dev: false @@ -3866,10 +3832,6 @@ packages: engines: {node: '>= 4.0.0'} dev: true - /atob-lite/1.0.0: - resolution: {integrity: sha512-ArXcmHR/vwSN37HLVap/Y5SKpz12CuEybxe1sIYl7th/S6SQPrVMNFt6rblJzCOAxn0SHbXpknUtqbAIeo3Aow==} - dev: false - /atob/2.1.2: resolution: {integrity: sha512-Wm6ukoaOGJi/73p/cl2GvLjTI5JM1k/O14isD73YML8StrH/7/lRFgmg8nICZgD3bZZvjwCGxtMOD3wWNAu8cg==} engines: {node: '>= 4.5.0'} @@ -3933,7 +3895,7 @@ packages: '@babel/types': 7.22.11 eslint: 6.8.0 eslint-visitor-keys: 1.3.0 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - supports-color dev: false @@ -4042,7 +4004,7 @@ packages: dependencies: '@babel/runtime': 7.9.0 cosmiconfig: 6.0.0 - resolve: 1.15.0 + resolve: 1.22.4 dev: false /babel-plugin-named-asset-import/0.3.8_@babel+core@7.9.0: @@ -4167,12 +4129,6 @@ packages: /balanced-match/1.0.2: resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} - /barycentric/1.0.1: - resolution: {integrity: sha512-47BuWXsenBbox4q1zqJrUoxq1oM1ysrYc5mdBACAwaP+CL+tcNauC3ybA0lzbIWzJCLZYMqebAx46EauTI2Nrg==} - dependencies: - robust-linear-solve: 1.0.0 - dev: false - /base/0.11.2: resolution: {integrity: sha512-5T6P4xPgpp0YDFvSWwEZ4NoE3aM4QBQXDzmVbraCkFj8zHM+mba8SyqB5DbZWyR7mYHo6Y7BdQo3MoA4m0TeQg==} engines: {node: '>=0.10.0'} @@ -4204,14 +4160,6 @@ packages: dependencies: tweetnacl: 0.14.5 - /big-rat/1.0.4: - resolution: {integrity: sha512-AubEohDDrak6urvKkFMIlwPWyQbJ/eq04YsK/SNipH7NNiPCYchjQNvWYK5vyyMmtGXAmNmsAjIcfkaDuTtd8g==} - dependencies: - bit-twiddle: 1.0.2 - bn.js: 4.12.0 - double-bits: 1.1.1 - dev: false - /big.js/5.2.2: resolution: {integrity: sha512-vyL2OymJxmarO8gxMr0mhChsO9QGwhynfuu4+MHTAW6czfq9humCB7rKpUjDd9YUiDPU4mzpyupFSvOClAwbmQ==} dev: false @@ -4238,10 +4186,6 @@ packages: dev: false optional: true - /bit-twiddle/0.0.2: - resolution: {integrity: sha512-76iFAOrkcuw5UPA30Pt32XaytMHXz/04JembgIwsQAp7ImHYSWNq1shBbrlWf6CUvh1+amQ81LI8hNhqQgsBEw==} - dev: false - /bit-twiddle/1.0.2: resolution: {integrity: sha512-B9UhK0DKFZhoTFcfvAzhqsjStvGJp9vYWf3+6SNTtdSQnvIgfkHbgHrg/e4+TH71N2GDu8tpmCVoyfrL1d7ntA==} dev: false @@ -4322,17 +4266,6 @@ packages: resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} dev: false - /boundary-cells/2.0.2: - resolution: {integrity: sha512-/S48oUFYEgZMNvdqC87iYRbLBAPHYijPRNrNpm/sS8u7ijIViKm/hrV3YD4sx/W68AsG5zLMyBEditVHApHU5w==} - dev: false - - /box-intersect/1.0.2: - resolution: {integrity: sha512-yJeMwlmFPG1gIa7Rs/cGXeI6iOj6Qz5MG5PE61xLKpElUGzmJ4abm+qsLpzxKJFpsSDq742BQEocr8dI2t8Nxw==} - dependencies: - bit-twiddle: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - /boxen/3.2.0: resolution: {integrity: sha512-cU4J/+NodM3IHdSL2yN8bqYqnmlBTidDR4RC7nJs61ZmtGz8VZzM3HLQX0zY5mrSmPtR3xWwsq2jOUQqFZN8+A==} engines: {node: '>=6'} @@ -4697,18 +4630,6 @@ packages: /caseless/0.12.0: resolution: {integrity: sha512-4tYFyifaFfGacoiObjJegolkwSU4xQNGbVgUiNYVUxbQ2x2lUsFvY4hVgVzGiIe6WLOPqycWXA40l+PWsxthUw==} - /cdt2d/1.0.0: - resolution: {integrity: sha512-pFKb7gVhpsI6onS5HUXRoqbBIJB4CJ+KPk8kgaIVcm0zFgOxIyBT5vzifZ4j1aoGVJS0U1A+S4oFDshuLAitlA==} - dependencies: - binary-search-bounds: 2.0.5 - robust-in-sphere: 1.2.1 - robust-orientation: 1.2.1 - dev: false - - /cell-orientation/1.0.1: - resolution: {integrity: sha512-DtEsrgP+donmPxpEZm7hK8zCPYDXAQ977ecJiE7G0gbTfnS6TZVBlief3IdRP/TZS1PVnJRGJTDdjSdV8mRDug==} - dev: false - /chalk/1.1.3: resolution: {integrity: sha512-U3lRVLMSlsCfjqYPbLyVv11M9CPW4I728d6TCKMAOJueEeB9/8o+eSsMnxPJD+Q+K909sdESg7C+tIkoH6on1A==} engines: {node: '>=0.10.0'} @@ -4822,19 +4743,6 @@ packages: inherits: 2.0.4 safe-buffer: 5.2.1 - /circumcenter/1.0.0: - resolution: {integrity: sha512-YRw0mvttcISviaOtSmaHb2G3ZVbkxzYPQeAEd57/CFFtmOkwfRTw9XuxYZ7PCi2BYa0NajjHV6bq4nbY1VCC8g==} - dependencies: - dup: 1.0.0 - robust-linear-solve: 1.0.0 - dev: false - - /circumradius/1.0.0: - resolution: {integrity: sha512-5ltoQvWQzJiZjCVX9PBKgKt+nsuzOLKayqXMNllfRSqIp2L5jFpdanv1V6j27Ue7ACxlzmamlR+jnLy+NTTVTw==} - dependencies: - circumcenter: 1.0.0 - dev: false - /clamp/1.0.1: resolution: {integrity: sha512-kgMuFyE78OC6Dyu3Dy7vcx4uy97EIbVxJB/B0eJ3bUNAkwdNcxYzgKltnyADiYwsR7SEqkkUPsEUT//OVS6XMA==} dev: false @@ -4863,18 +4771,6 @@ packages: source-map: 0.6.1 dev: false - /clean-pslg/1.1.2: - resolution: {integrity: sha512-bJnEUR6gRiiNi2n4WSC6yrc0Hhn/oQDOTzs6evZfPwEF/VKVXM6xu0F4n/WSBz7TjTt/ZK6I5snRM9gVKMVAxA==} - dependencies: - big-rat: 1.0.4 - box-intersect: 1.0.2 - nextafter: 1.0.0 - rat-vec: 1.1.1 - robust-segment-intersect: 1.0.1 - union-find: 1.0.2 - uniq: 1.0.1 - dev: false - /clean-regexp/1.0.0: resolution: {integrity: sha512-GfisEZEJvzKrmGWkvfhgzcz/BllN1USeqD2V6tg14OAOgaCD2Z/PUEuxnAZ/nPvmaHRG7a8y77p1T/IRQ4D1Hw==} engines: {node: '>=4'} @@ -5039,12 +4935,6 @@ packages: color-string: 1.9.1 dev: false - /colormap/2.3.2: - resolution: {integrity: sha512-jDOjaoEEmA9AgA11B/jCSAvYE95r3wRoAyTf3LEHGiUVlNHJaL1mRkf5AyLSpQBVGfTEPwGEqCIzL+kgr2WgNA==} - dependencies: - lerp: 1.0.3 - dev: false - /combined-stream/1.0.8: resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} engines: {node: '>= 0.8'} @@ -5068,27 +4958,6 @@ packages: /commondir/1.0.1: resolution: {integrity: sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==} - /compare-angle/1.0.1: - resolution: {integrity: sha512-adM1/bpLFQFquh0/Qr5aiOPuztoga/lCf2Z45s+Oydgzf18F3wBSkdHmcHMeig0bD+dDKlz52u1rLOAOqiyE5A==} - dependencies: - robust-orientation: 1.2.1 - robust-product: 1.0.0 - robust-sum: 1.0.0 - signum: 0.0.0 - two-sum: 1.0.0 - dev: false - - /compare-cell/1.0.0: - resolution: {integrity: sha512-uNIkjiNLZLhdCgouF39J+W04R7oP1vwrNME4vP2b2/bAa6PHOj+h8yXu52uPjPTKs5RatvqNsDVwEN7Yp19vNA==} - dev: false - - /compare-oriented-cell/1.0.1: - resolution: {integrity: sha512-9D7R2MQfsGGRskZAZF0TkJHt9eFNbFkZyVdVps+WUYxtRHgG77BLbieKgSkj7iEAb9PNDSU9QNa9MtigjQ3ktQ==} - dependencies: - cell-orientation: 1.0.1 - compare-cell: 1.0.0 - dev: false - /component-classes/1.2.6: resolution: {integrity: sha512-hPFGULxdwugu1QWW3SvVOCUHLzO34+a2J6Wqy0c5ASQkfi9/8nZcBB0ZohaEbXOQlCflMAEMmEWk7u7BVs4koA==} dependencies: @@ -5233,14 +5102,6 @@ packages: resolution: {integrity: sha512-ASFBup0Mz1uyiIjANan1jzLQami9z1PoYSZCiiYW2FczPbenXc45FZdBZLzOT+r6+iciuEModtmCti+hjaAk0A==} dev: false - /convex-hull/1.0.3: - resolution: {integrity: sha512-24rZAoh81t41GHPLAxcsokgjH9XNoVqU2OiSi8iMHUn6HUURfiefcEWAPt1AfwZjBBWTKadOm1xUcUMnfFukhQ==} - dependencies: - affine-hull: 1.0.0 - incremental-convex-hull: 1.0.1 - monotone-convex-hull-2d: 1.0.1 - dev: false - /cookie-signature/1.0.6: resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} @@ -5705,10 +5566,6 @@ packages: resolution: {integrity: sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==} dev: false - /cubic-hermite/1.0.0: - resolution: {integrity: sha512-DKZ6yLcJiJJgl54mGA4n0uueYB4qdPfOJrQ1HSEZqdKp6D25AAAWVDwpoAxLflOku5a/ALBO77oEIyWcVa+UYg==} - dev: false - /currently-unhandled/0.4.1: resolution: {integrity: sha512-/fITjgjGU50vjQ4FH6eUoYu+iUoUKIXws2hL15JJpIR+BbTxaXQsMuuyjtNh2WqsSBS5nsaZHFsFecyw5CCAng==} engines: {node: '>=0.10.0'} @@ -5722,12 +5579,6 @@ packages: lodash.flow: 3.5.0 dev: false - /cwise-compiler/1.1.3: - resolution: {integrity: sha512-WXlK/m+Di8DMMcCjcWr4i+XzcQra9eCdXIJrgh4TUgh0pIS/yJduLxS9JgefsHJ/YVLdgPtXm9r62W92MvanEQ==} - dependencies: - uniq: 1.0.1 - dev: false - /cyclist/1.0.2: resolution: {integrity: sha512-0sVXIohTfLqVIW3kb/0n6IiWF3Ifj5nm2XaSrLq2DI6fKIGa2fYAZdk917rUneaeLVpYfFcyXE2ft0fe3remsA==} dev: false @@ -5764,12 +5615,33 @@ packages: d3-timer: 1.0.10 dev: false + /d3-format/1.4.5: + resolution: {integrity: sha512-J0piedu6Z8iB6TbIGfZgDzfXxUFN3qQRMofy2oPdXzQibYGqPB/9iMcxr/TGalU+2RsyDO+U4f33id8tbnSRMQ==} + dev: false + + /d3-geo-projection/2.9.0: + resolution: {integrity: sha512-ZULvK/zBn87of5rWAfFMc9mJOipeSo57O+BBitsKIXmU4rTVAnX1kSsJkE0R+TxY8pGNoM1nbyRRE7GYHhdOEQ==} + hasBin: true + dependencies: + commander: 2.20.3 + d3-array: 1.2.4 + d3-geo: 1.12.1 + resolve: 1.22.4 + dev: false + + /d3-geo/1.12.1: + resolution: {integrity: sha512-XG4d1c/UJSEX9NfU02KwBL6BYPj8YKHxgBEw5om2ZnTRSbIcego6dhHwcxuSR3clxh0EpE38os1DVPOmnYtTPg==} + dependencies: + d3-array: 1.2.4 + dev: false + /d3-hierarchy/1.1.9: resolution: {integrity: sha512-j8tPxlqh1srJHAtxfvOUwKNYJkQuBFdM1+JAUfq6xqH5eAqf93L7oG1NVqDa4CpFZNvnNKtCYEUC8KY9yEn9lQ==} dev: false - /d3-interpolate/1.4.0: - resolution: {integrity: sha512-V9znK0zc3jOPV4VD2zZn0sDhZU3WAE2bmlxdIwwQPPzPjvyLkd8B3JUVdS1IDUFDkWZ72c9qnv1GK2ZagTZ8EA==} + /d3-interpolate/3.0.1: + resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} + engines: {node: '>=12'} dependencies: d3-color: 1.4.1 dev: false @@ -5802,10 +5674,6 @@ packages: resolution: {integrity: sha512-B1JDm0XDaQC+uvo4DT79H0XmBskgS3l6Ve+1SBCfxgmtIb1AVrPIoqd+nPSv+loMX8szQ0sVUhGngL7D5QPiXw==} dev: false - /d3/3.5.17: - resolution: {integrity: sha512-yFk/2idb8OHPKkbAL8QaOaqENNoMhIaSHZerk3oQsECwkObkCpJyjYwCe+OHiq6UEdhe1m8ZGARRRO3ljFjlKg==} - dev: false - /damerau-levenshtein/1.0.8: resolution: {integrity: sha512-sdQSFB7+llfUcQHUQO3+B8ERRj0Oa4w9POWMI/puGtuf7gFywGmkaLCElnudfTiKZV+NvHqL0ifzdrI8Ro7ESA==} dev: false @@ -6007,13 +5875,6 @@ packages: rimraf: 2.7.1 dev: false - /delaunay-triangulate/1.1.6: - resolution: {integrity: sha512-mhAclqFCgLoiBIDQDIz2K+puZq6OhYxunXrG2wtTcZS+S1xuzl+H3h0MIOajpES+Z+jfY/rz0wVt3o5iipt1wg==} - dependencies: - incremental-convex-hull: 1.0.1 - uniq: 1.0.1 - dev: false - /delayed-stream/1.0.0: resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} engines: {node: '>=0.4.0'} @@ -6245,10 +6106,6 @@ packages: engines: {node: '>=8'} dev: false - /double-bits/1.1.1: - resolution: {integrity: sha512-BCLEIBq0O/DWoA7BsCu/R+RP0ZXiowP8BhtJT3qeuuQEBpnS8LK/Wo6UTJQv6v8mK1fj8n90YziHLwGdM5whSg==} - dev: false - /draft-js/0.10.5_wcqkhtmu7mswc6yz4uyexck3ty: resolution: {integrity: sha512-LE6jSCV9nkPhfVX2ggcRLA4FKs6zWq9ceuO/88BpXdNCS7mjRTgs0NsV6piUCJX9YxMsB9An33wnkMmU2sD2Zg==} peerDependencies: @@ -6305,12 +6162,6 @@ packages: jsbn: 0.1.1 safer-buffer: 2.1.2 - /edges-to-adjacency-list/1.0.0: - resolution: {integrity: sha512-0n0Z+xTLfg96eYXm91PEY4rO4WGxohLWjJ9qD1RI3fzxKU6GHez+6KPajpobR4zeZxp7rSiHjHG5dZPj8Kj58Q==} - dependencies: - uniq: 1.0.1 - dev: false - /ee-first/1.1.1: resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} @@ -6546,10 +6397,6 @@ packages: es6-symbol: 3.1.3 dev: false - /es6-promise/4.2.8: - resolution: {integrity: sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==} - dev: false - /es6-symbol/3.1.3: resolution: {integrity: sha512-NJ6Yn3FuDinBaBRWl/q5X/s4koRHBrgKAu+yGI6JCBeiu3qrcbJhwT2GeR/EXVfylRk8dpQVJoLEFhK+Mu31NA==} dependencies: @@ -6874,7 +6721,7 @@ packages: minimatch: 3.1.2 object.values: 1.1.6 read-pkg-up: 2.0.0 - resolve: 1.15.0 + resolve: 1.22.4 transitivePeerDependencies: - eslint-import-resolver-typescript - eslint-import-resolver-webpack @@ -7503,10 +7350,6 @@ packages: - supports-color dev: false - /extract-frustum-planes/1.0.0: - resolution: {integrity: sha512-GivvxEMgjSNnB3e1mIMBlB5ogPB6XyEjOQRGG0SfYVVLtu1ntLGHLT1ly8+mE819dKBHBwnm9+UBCScjiMgppA==} - dev: false - /extsprintf/1.3.0: resolution: {integrity: sha512-11Ndz7Nv+mvAC1j0ktTa7fAb0vLyGGX+rMHNBYQviQDGU0Hw7lhctJANqbPhu9nV9/izT/IntTgZ7Im/9LJs9g==} engines: {'0': node >=0.6.0} @@ -7641,13 +7484,6 @@ packages: dependencies: to-regex-range: 5.0.1 - /filtered-vector/1.2.5: - resolution: {integrity: sha512-5Vu6wdtQJ1O2nRmz39dIr9m3hEDq1skYby5k1cJQdNWK4dMgvYcUEiA/9j7NcKfNZ5LGxn8w2LSLiigyH7pTAw==} - dependencies: - binary-search-bounds: 2.0.5 - cubic-hermite: 1.0.0 - dev: false - /finalhandler/1.2.0: resolution: {integrity: sha512-5uXcUVftlQMFnWC9qu/svkWv3GTd2PfUhK/3PLkYNAe7FbqJMt3515HaxE6eRL74GdsriiwujiawdaB1BpEISg==} engines: {node: '>= 0.8'} @@ -7990,10 +7826,6 @@ packages: /functions-have-names/1.2.3: resolution: {integrity: sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ==} - /gamma/0.1.0: - resolution: {integrity: sha512-IgHc/jnzNTA2KjXmRSx/CVd1ONp7HTAV81SLI+n3G6PyyHkakkE+2d3hteJYFm7aoe01NEl4m7ziUAsoWCc5AA==} - dev: false - /gensync/1.0.0-beta.2: resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} engines: {node: '>=6.9.0'} @@ -8077,120 +7909,6 @@ packages: dependencies: assert-plus: 1.0.0 - /gl-axes3d/1.5.3: - resolution: {integrity: sha512-KRYbguKQcDQ6PcB9g1pgqB8Ly4TY1DQODpPKiDTasyWJ8PxQk0t2Q7XoQQijNqvsguITCpVVCzNb5GVtIWiVlQ==} - dependencies: - bit-twiddle: 1.0.2 - dup: 1.0.0 - extract-frustum-planes: 1.0.0 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-state: 1.0.0 - gl-vao: 1.3.0 - gl-vec4: 1.0.1 - glslify: 7.1.1 - robust-orientation: 1.2.1 - split-polygon: 1.0.0 - vectorize-text: 3.2.2 - dev: false - - /gl-buffer/2.1.2: - resolution: {integrity: sha512-uVvLxxhEbQGl43xtDeKu75ApnrGyNHoPmOcvvuJNyP04HkK0/sX5Dll6OFffQiwSV4j0nlAZsgznvO3CPT3dFg==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - - /gl-cone3d/1.5.2: - resolution: {integrity: sha512-1JNeHH4sUtUmDA4ZK7Om8/kShwb8IZVAsnxaaB7IPRJsNGciLj1sTpODrJGeMl41RNkex5kXD2SQFrzyEAR2Rw==} - dependencies: - colormap: 2.3.2 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - gl-vec3: 1.1.3 - glsl-inverse: 1.0.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - ndarray: 1.0.19 - dev: false - - /gl-constants/1.0.0: - resolution: {integrity: sha512-3DNyoAUdb1c+o7jNk5Nm7eh6RSQFi9ZmMQIQb2xxsO27rUopE+IUhoh4xlUvZYBn1YPgUC8BlCnrVjXq/d2dQA==} - dev: false - - /gl-contour2d/1.1.7: - resolution: {integrity: sha512-GdebvJ9DtT3pJDpoE+eU2q+Wo9S3MijPpPz5arZbhK85w2bARmpFpVfPaDlZqWkB644W3BlH8TVyvAo1KE4Bhw==} - dependencies: - binary-search-bounds: 2.0.5 - cdt2d: 1.0.0 - clean-pslg: 1.1.2 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - iota-array: 1.0.0 - ndarray: 1.0.19 - surface-nets: 1.0.2 - dev: false - - /gl-error3d/1.0.16: - resolution: {integrity: sha512-TGJewnKSp7ZnqGgG3XCF9ldrDbxZrO+OWlx6oIet4OdOM//n8xJ5isArnIV/sdPJnFbhfoLxWrW9f5fxHFRQ1A==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - dev: false - - /gl-fbo/2.0.5: - resolution: {integrity: sha512-tDq6zQSQzvvK2QwPV7ln7cf3rs0jV1rQXqKOEuB145LdN+xhADPBtXHDJ3Ftk80RAJimJU0AaQBgP/X6yYGNhQ==} - dependencies: - gl-texture2d: 2.1.0 - dev: false - - /gl-format-compiler-error/1.0.3: - resolution: {integrity: sha512-FtQaBYlsM/rnz7YhLkxG9dLcNDB+ExErIsFV2DXl0nk+YgIZ2i0jMob4BrhT9dNa179zFb0gZMWpNAokytK+Ug==} - dependencies: - add-line-numbers: 1.0.1 - gl-constants: 1.0.0 - glsl-shader-name: 1.0.0 - sprintf-js: 1.1.2 - dev: false - - /gl-heatmap2d/1.1.1: - resolution: {integrity: sha512-6Vo1fPIB1vQFWBA/MR6JAA16XuQuhwvZRbSjYEq++m4QV33iqjGS2HcVIRfJGX+fomd5eiz6bwkVZcKm69zQPw==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - iota-array: 1.0.0 - typedarray-pool: 1.2.0 - dev: false - - /gl-line3d/1.2.1: - resolution: {integrity: sha512-eeb0+RI2ZBRqMYJK85SgsRiJK7c4aiOjcnirxv0830A3jmOc99snY3AbPcV8KvKmW0Yaf3KA4e+qNCbHiTOTnA==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - ndarray: 1.0.19 - dev: false - - /gl-mat3/1.0.0: - resolution: {integrity: sha512-obeEq9y7xaDoVkwMGJNL1upwpYlPJiXJFhREaNytMqUdfHKHNna9HvImmLV8F8Ys6QOYwPPddptZNoiiec/XOg==} - dev: false - /gl-mat4/1.2.0: resolution: {integrity: sha512-sT5C0pwB1/e9G9AvAoLsoaJtbMGjfd/jfxo8jMCKqYYEnjZuFvqV5rehqar0538EmssjdDeiEWnKyBSTw7quoA==} dev: false @@ -8199,169 +7917,6 @@ packages: resolution: {integrity: sha512-wcCp8vu8FT22BnvKVPjXa/ICBWRq/zjFfdofZy1WSpQZpphblv12/bOQLBC1rMM7SGOFS9ltVmKOHil5+Ml7gA==} dev: false - /gl-mesh3d/2.3.1: - resolution: {integrity: sha512-pXECamyGgu4/9HeAQSE5OEUuLBGS1aq9V4BCsTcxsND4fNLaajEkYKUz/WY2QSYElqKdsMBVsldGiKRKwlybqA==} - dependencies: - barycentric: 1.0.1 - colormap: 2.3.2 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - ndarray: 1.0.19 - normals: 1.1.0 - polytope-closest-point: 1.0.0 - simplicial-complex-contour: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - - /gl-plot2d/1.4.5: - resolution: {integrity: sha512-6GmCN10SWtV+qHFQ1gjdnVubeHFVsm6P4zmo0HrPIl9TcdePCUHDlBKWAuE6XtFhiMKMj7R8rApOX8O8uXUYog==} - dependencies: - binary-search-bounds: 2.0.5 - gl-buffer: 2.1.2 - gl-select-static: 2.0.7 - gl-shader: 4.3.1 - glsl-inverse: 1.0.0 - glslify: 7.1.1 - text-cache: 4.2.2 - dev: false - - /gl-plot3d/2.4.7: - resolution: {integrity: sha512-mLDVWrl4Dj0O0druWyHUK5l7cBQrRIJRn2oROEgrRuOgbbrLAzsREKefwMO0bA0YqkiZMFMnV5VvPA9j57X5Xg==} - dependencies: - 3d-view: 2.0.1 - a-big-triangle: 1.0.3 - gl-axes3d: 1.5.3 - gl-fbo: 2.0.5 - gl-mat4: 1.2.0 - gl-select-static: 2.0.7 - gl-shader: 4.3.1 - gl-spikes3d: 1.0.10 - glslify: 7.1.1 - has-passive-events: 1.0.0 - is-mobile: 2.2.2 - mouse-change: 1.4.0 - mouse-event-offset: 3.0.2 - mouse-wheel: 1.2.0 - ndarray: 1.0.19 - right-now: 1.0.0 - dev: false - - /gl-pointcloud2d/1.0.3: - resolution: {integrity: sha512-OS2e1irvJXVRpg/GziXj10xrFJm9kkRfFoB6BLUvkjCQV7ZRNNcs2CD+YSK1r0gvMwTg2T3lfLM3UPwNtz+4Xw==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - typedarray-pool: 1.2.0 - dev: false - - /gl-quat/1.0.0: - resolution: {integrity: sha512-Pv9yvjJgQN85EbE79S+DF50ujxDkyjfYHIyXJcCRiimU1UxMY7vEHbVkj0IWLFaDndhfZT9vVOyfdMobLlrJsQ==} - dependencies: - gl-mat3: 1.0.0 - gl-vec3: 1.1.3 - gl-vec4: 1.0.1 - dev: false - - /gl-scatter3d/1.2.3: - resolution: {integrity: sha512-nXqPlT1w5Qt51dTksj+DUqrZqwWAEWg0PocsKcoDnVNv0X8sGA+LBZ0Y+zrA+KNXUL0PPCX9WR9cF2uJAZl1Sw==} - dependencies: - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glslify: 7.1.1 - is-string-blank: 1.0.1 - typedarray-pool: 1.2.0 - vectorize-text: 3.2.2 - dev: false - - /gl-select-box/1.0.4: - resolution: {integrity: sha512-mKsCnglraSKyBbQiGq0Ila0WF+m6Tr+EWT2yfaMn/Sh9aMHq5Wt0F/l6Cf/Ed3CdERq5jHWAY5yxLviZteYu2w==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - glslify: 7.1.1 - dev: false - - /gl-select-static/2.0.7: - resolution: {integrity: sha512-OvpYprd+ngl3liEatBTdXhSyNBjwvjMSvV2rN0KHpTU+BTi4viEETXNZXFgGXY37qARs0L28ybk3UQEW6C5Nnw==} - dependencies: - bit-twiddle: 1.0.2 - gl-fbo: 2.0.5 - ndarray: 1.0.19 - typedarray-pool: 1.2.0 - dev: false - - /gl-shader/4.3.1: - resolution: {integrity: sha512-xLoN6XtRLlg97SEqtuzfKc+pVWpVkQ3YjDI1kuCale8tF7+zMhiKlMfmG4IMQPMdKJZQbIc/Ny8ZusEpfh5U+w==} - dependencies: - gl-format-compiler-error: 1.0.3 - weakmap-shim: 1.1.1 - dev: false - - /gl-spikes2d/1.0.2: - resolution: {integrity: sha512-QVeOZsi9nQuJJl7NB3132CCv5KA10BWxAY2QgJNsKqbLsG53B/TrGJpjIAohnJftdZ4fT6b3ZojWgeaXk8bOOA==} - dev: false - - /gl-spikes3d/1.0.10: - resolution: {integrity: sha512-lT3xroowOFxMvlhT5Mof76B2TE02l5zt/NIWljhczV2FFHgIVhA4jMrd5dIv1so1RXMBDJIKu0uJI3QKliDVLg==} - dependencies: - gl-buffer: 2.1.2 - gl-shader: 4.3.1 - gl-vao: 1.3.0 - glslify: 7.1.1 - dev: false - - /gl-state/1.0.0: - resolution: {integrity: sha512-Od836PpgCuTC0W7uHYnEEPRdQPL1FakWlznz3hRvlO6tD5sdLfBKX9qNRGy1DjfMCDTudhyYWxiWjhql1B8N4Q==} - dependencies: - uniq: 1.0.1 - dev: false - - /gl-streamtube3d/1.4.1: - resolution: {integrity: sha512-rH02v00kgwgdpkXVo7KsSoPp38bIAYR9TE1iONjcQ4cQAlDhrGRauqT/P5sUaOIzs17A2DxWGcXM+EpNQs9pUA==} - dependencies: - gl-cone3d: 1.5.2 - gl-vec3: 1.1.3 - gl-vec4: 1.0.1 - glsl-inverse: 1.0.0 - glsl-out-of-range: 1.0.4 - glsl-specular-cook-torrance: 2.0.1 - glslify: 7.1.1 - dev: false - - /gl-surface3d/1.6.0: - resolution: {integrity: sha512-x15+u4712ysnB85G55RLJEml6mOB4VaDn0VTlXCc9JcjRl5Es10Tk7lhGGyiPtkCfHwvhnkxzYA1/rHHYN7Y0A==} - dependencies: - binary-search-bounds: 2.0.5 - bit-twiddle: 1.0.2 - colormap: 2.3.2 - dup: 1.0.0 - gl-buffer: 2.1.2 - gl-mat4: 1.2.0 - gl-shader: 4.3.1 - gl-texture2d: 2.1.0 - gl-vao: 1.3.0 - glsl-out-of-range: 1.0.4 - glsl-specular-beckmann: 1.1.2 - glslify: 7.1.1 - ndarray: 1.0.19 - ndarray-gradient: 1.0.1 - ndarray-ops: 1.2.2 - ndarray-pack: 1.2.1 - ndarray-scratch: 1.2.0 - surface-nets: 1.0.2 - typedarray-pool: 1.2.0 - dev: false - /gl-text/1.3.1: resolution: {integrity: sha512-/f5gcEMiZd+UTBJLTl3D+CkCB/0UFGTx3nflH8ZmyWcLkZhsZ1+Xx5YYkw2rgWAzgPeE35xCqBuHSoMKQVsR+w==} dependencies: @@ -8384,14 +7939,6 @@ packages: typedarray-pool: 1.2.0 dev: false - /gl-texture2d/2.1.0: - resolution: {integrity: sha512-W0tzEjtlGSsCKq5FFwFVhH+fONFUTUeqM4HhA/BleygKaX39IwNTVOiqkwfu9szQZ4dQEq8ZDl7w1ud/eKLaZA==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - /gl-util/3.1.3: resolution: {integrity: sha512-dvRTggw5MSkJnCbh74jZzSoTOGnVYK+Bt+Ckqm39CVcl6+zSsxqWk4lr5NKhkqXHL6qvZAU9h17ZF8mIskY9mA==} dependencies: @@ -8404,18 +7951,6 @@ packages: weak-map: 1.0.8 dev: false - /gl-vao/1.3.0: - resolution: {integrity: sha512-stSOZ+n0fnAxgDfipwKK/73AwzCNL+AFEc/v2Xm76nyFnUZGmQtD2FEC3lt1icoOHAzMgHBAjCue7dBIDeOTcw==} - dev: false - - /gl-vec3/1.1.3: - resolution: {integrity: sha512-jduKUqT0SGH02l8Yl+mV1yVsDfYgQAJyXGxkJQGyxPLHRiW25DwVIRPt6uvhrEMHftJfqhqKthRcyZqNEl9Xdw==} - dev: false - - /gl-vec4/1.0.1: - resolution: {integrity: sha512-/gx5zzIy75JXzke4yuwcbvK+COWf8UJbVCUPvhfsYVw1GVey4Eextk/0H0ctXnOICruNK7+GS4ILQzEQcHcPEg==} - dev: false - /glob-parent/3.1.0: resolution: {integrity: sha512-E8Ak/2+dZY6fnzlR7+ueWvhsH1SjHr4jjss4YS/h4py44jY9MhK/VFdaZJAWDz6BbL21KeteKxFSFpq8OS5gVA==} dependencies: @@ -8538,14 +8073,6 @@ packages: glsl-tokenizer: 2.1.5 dev: false - /glsl-inverse/1.0.0: - resolution: {integrity: sha512-+BsseNlgqzd4IFX1dMqg+S0XuIXzH0acvTtW7svwhJESM1jb2BZFwdO+tOWdCXD5Zse6b9bOmzp5sCNA7GQ2QA==} - dev: false - - /glsl-out-of-range/1.0.4: - resolution: {integrity: sha512-fCcDu2LCQ39VBvfe1FbhuazXEf0CqMZI9OYXrYlL6uUARG48CTAbL04+tZBtVM0zo1Ljx4OLu2AxNquq++lxWQ==} - dev: false - /glsl-resolve/0.0.1: resolution: {integrity: sha512-xxFNsfnhZTK9NBhzJjSBGX6IOqYpvBHxxmo+4vapiljyGNCY0Bekzn0firQkQrazK59c1hYxMDxYS8MDlhw4gA==} dependencies: @@ -8553,23 +8080,6 @@ packages: xtend: 2.2.0 dev: false - /glsl-shader-name/1.0.0: - resolution: {integrity: sha512-OtHon0dPCbJD+IrVA1vw9QDlp2cS/f9z8X/0y+W7Qy1oZ3U1iFAQUEco2v30V0SAlVLDG5rEfhjEfc3DKdGbFQ==} - dependencies: - atob-lite: 1.0.0 - glsl-tokenizer: 2.1.5 - dev: false - - /glsl-specular-beckmann/1.1.2: - resolution: {integrity: sha512-INvd7szO1twNPLGwE0Kf2xXIEy5wpOPl/LYoiw3+3nbAe6Rfn5rjdK9xvfnwoWksTCs3RejuLeAiZkLTkdFtwg==} - dev: false - - /glsl-specular-cook-torrance/2.0.1: - resolution: {integrity: sha512-bFtTfbgLXIbto/U6gM7h0IxoPMU+5zpMK5HoAaA2LnPuGk3JSzKAnsoyh5QGTT8ioIEQrjk6jcQNrgujPsP7rw==} - dependencies: - glsl-specular-beckmann: 1.1.2 - dev: false - /glsl-token-assignments/2.0.2: resolution: {integrity: sha512-OwXrxixCyHzzA0U2g4btSNAyB2Dx8XrztY5aVUCjRSh4/D0WoJn8Qdps7Xub3sz6zE73W3szLrmWtQ7QMpeHEQ==} dev: false @@ -9090,12 +8600,6 @@ packages: dev: false optional: true - /image-size/0.7.5: - resolution: {integrity: sha512-Hiyv+mXHfFEP7LzUL/llg9RwFxxY+o9N3JVLIeG5E7iFIFAalxvRU9UZthBdYDEVnzHMgjnKJPPpay5BWf1g9g==} - engines: {node: '>=6.9.0'} - hasBin: true - dev: false - /immer/1.10.0: resolution: {integrity: sha512-O3sR1/opvCDGLEVcvrGTMtLac8GJ5IwZC4puPrLuRj3l7ICKvkmA0vGuU9OW8mV9WIBRnaxp5GJh9IEAaNOoYg==} dev: false @@ -9157,13 +8661,6 @@ packages: resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} engines: {node: '>=0.8.19'} - /incremental-convex-hull/1.0.1: - resolution: {integrity: sha512-mKRJDXtzo1R9LxCuB1TdwZXHaPaIEldoGPsXy2jrJc/kufyqp8y/VAQQxThSxM2aroLoh6uObexPk1ASJ7FB7Q==} - dependencies: - robust-orientation: 1.2.1 - simplicial-complex: 1.0.0 - dev: false - /indent-string/3.2.0: resolution: {integrity: sha512-BYqTHXTGUIvg7t1r4sJNKcbDZkL92nkXA8YtRpbjFHRHGDL/NtUeiBJMeE60kIFN/Mg8ESaWQvftaYMGJzQZCQ==} engines: {node: '>=4'} @@ -9262,26 +8759,12 @@ packages: engines: {node: '>= 0.10'} dev: true - /interval-tree-1d/1.0.4: - resolution: {integrity: sha512-wY8QJH+6wNI0uh4pDQzMvl+478Qh7Rl4qLmqiluxALlNvl+I+o5x38Pw3/z7mDPTPS1dQalZJXsmbvxx5gclhQ==} - dependencies: - binary-search-bounds: 2.0.5 - dev: false - /invariant/2.2.4: resolution: {integrity: sha512-phJfQVBuaJM5raOpJjSfkiD6BpbCE4Ns//LaXl6wGYtUBY83nWS6Rf9tXm2e8VaK60JEjYldbPif/A2B1C2gNA==} dependencies: loose-envify: 1.4.0 dev: false - /invert-permutation/1.0.0: - resolution: {integrity: sha512-8f473/KSrnvyBd7Khr4PC5wPkAOehwkGc+AH5Q7D+U/fE+cdDob2FJ3naXAs4mspR9JIaEwbDI3me8H0KlVzSQ==} - dev: false - - /iota-array/1.0.0: - resolution: {integrity: sha512-pZ2xT+LOHckCatGQ3DcG/a+QuEqvoxqkiL7tvE8nn3uuu+f6i1TtpB5/FtWFbxUuVr5PZCx8KskuGatbJDXOWA==} - dev: false - /ip-regex/2.1.0: resolution: {integrity: sha512-58yWmlHpp7VYfcdTwMTvwMmqx/Elfxjd9RXTDyMsbL7lLWmhMylLEqiYVLKuLzOZqVgiWXD9MfR62Vv89VRxkw==} engines: {node: '>=4'} @@ -9571,6 +9054,10 @@ packages: resolution: {integrity: sha512-wW/SXnYJkTjs++tVK5b6kVITZpAZPtUrt9SF80vvxGiF/Oywal+COk1jlRkiVq15RFNEQKQY31TkV24/1T5cVg==} dev: false + /is-mobile/4.0.0: + resolution: {integrity: sha512-mlcHZA84t1qLSuWkt2v0I2l61PYdyQDt4aG1mLIXF5FDMm4+haBCxCPYSr/uwqQNRk1MiTizn0ypEuRAOLRAew==} + dev: false + /is-negated-glob/1.0.0: resolution: {integrity: sha512-czXVVn/QEmgvej1f50BZ648vUI+em0xqMq2Sn+QncCLN4zj1UAxlT+kw/6ggQTOaZPd1HqKQGEqbpQVtJucWug==} engines: {node: '>=0.10.0'} @@ -10650,10 +10137,6 @@ packages: deprecated: use String.prototype.padStart() dev: false - /lerp/1.0.3: - resolution: {integrity: sha512-70Rh4rCkJDvwWiTsyZ1HmJGvnyfFah4m6iTux29XmasRiZPDBpT9Cfa4ai73+uLZxnlKruUS62jj2lb11wURiA==} - dev: false - /less-loader/5.0.0_less@3.13.1: resolution: {integrity: sha512-bquCU89mO/yWLaUq0Clk7qCsKhsF/TZpJUzETRvJa9KSVEL9SO3ovCvdEHISBhrC81OwC8QSVX7E0bzElZj9cg==} engines: {node: '>= 4.8.0'} @@ -10820,6 +10303,10 @@ packages: resolution: {integrity: sha512-t7j+NzmgnQzTAYXcsHYLgimltOV1MXHtlOWf6GjL9Kj8GK5FInw5JotxvbOs+IvV1/Dzo04/fCGfLVs7aXb4Ag==} dev: false + /lodash.merge/4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + dev: false + /lodash.sortby/4.7.0: resolution: {integrity: sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==} dev: false @@ -11010,49 +10497,11 @@ packages: vt-pbf: 3.1.3 dev: false - /marching-simplex-table/1.0.0: - resolution: {integrity: sha512-PexXXVF4f5Bux3vGCNlRRBqF/GyTerNo77PbBz8g/MFFXv212b48IGVglj/VfaYBRY6vlFQffa9dFbCCN0+7LA==} - dependencies: - convex-hull: 1.0.3 - dev: false - - /mat4-decompose/1.0.4: - resolution: {integrity: sha512-M3x6GXrzRTt5Ok4/bcHFc869Pe8F3uWaSp3xkUpi+uaTRulPXIZ1GWD13Z3A8WK2bxTrcvX21mjp05gUy/Dwbw==} - dependencies: - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - dev: false - - /mat4-interpolate/1.0.4: - resolution: {integrity: sha512-+ulnoc6GUHq8eGZGbLyhQU61tx2oeNAFilV/xzCCzLV+F3nDk8jqERUqRmx8eNMMMvrdvoRSw0JXmnisfVPY9A==} - dependencies: - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - mat4-decompose: 1.0.4 - mat4-recompose: 1.0.4 - quat-slerp: 1.0.1 - dev: false - - /mat4-recompose/1.0.4: - resolution: {integrity: sha512-s1P2Yl4LQxq8dN0CgJE+mCO8y3IX/SmauSZ+H0zJsE1UKlgJ9loInfPC/OUxn2MzUW9bfBZf0Wcc2QKA3/e6FQ==} - dependencies: - gl-mat4: 1.2.0 - dev: false - /math-log2/1.0.1: resolution: {integrity: sha512-9W0yGtkaMAkf74XGYVy4Dqw3YUMnTNB2eeiw9aQbUl4A3KmuCEHTt2DgAB07ENzOYAjsYSAYufkAq0Zd+jU7zA==} engines: {node: '>=0.10.0'} dev: false - /matrix-camera-controller/2.1.4: - resolution: {integrity: sha512-zsPGPONclrKSImNpqqKDTcqFpWLAIwMXEJtCde4IFPOw1dA9udzFg4HOFytOTosOFanchrx7+Hqq6glLATIxBA==} - dependencies: - binary-search-bounds: 2.0.5 - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - mat4-interpolate: 1.0.4 - dev: false - /md5.js/1.3.5: resolution: {integrity: sha512-xitP+WxNPcTTOgnTJcrhM0xvdPepipPSf3I8EIpGKeFLjt3PlJLIDG3u8EX53ZIubkb+5U2+3rELYpEhHhzdkg==} dependencies: @@ -11364,12 +10813,6 @@ packages: resolution: {integrity: sha512-5LC9SOxjSc2HF6vO2CyuTDNivEdoz2IvyJJGj6X8DJ0eFyfszE0QiEd+iXmBvUP3WHxSjFH/vIsA0EN00cgr8w==} dev: false - /monotone-convex-hull-2d/1.0.1: - resolution: {integrity: sha512-ixQ3qdXTVHvR7eAoOjKY8kGxl9YjOFtzi7qOjwmFFPfBqZHVOjUFOBy/Dk9dusamRSPJe9ggyfSypRbs0Bl8BA==} - dependencies: - robust-orientation: 1.2.1 - dev: false - /morgan/1.10.0: resolution: {integrity: sha512-AbegBVI4sh6El+1gNwvD5YIck7nSA36weD7xvIxG4in80j/UoK8AEGaWnnz8v1GxonMCltmlNs5ZKbGvl9b1XQ==} engines: {node: '>= 0.8.0'} @@ -11506,6 +10949,10 @@ packages: - supports-color dev: false + /native-promise-only/0.8.1: + resolution: {integrity: sha512-zkVhZUA3y8mbz652WrL5x0fB0ehrBkulWT3TomAQ9iDtyXZvzKeEA6GPxAItBYeNYl5yngKRX612qHOhvMkDeg==} + dev: false + /native-request/1.1.0: resolution: {integrity: sha512-uZ5rQaeRn15XmpgE0xoPL8YWqcX90VtCFglYwAgkvKM5e8fog+vePLAhHxuuv/gRkrQxIeh5U3q9sMNUrENqWw==} requiresBuild: true @@ -11515,55 +10962,16 @@ packages: /natural-compare/1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} - /ndarray-extract-contour/1.0.1: - resolution: {integrity: sha512-iDngNoFRqrqbXGLP8BzyGrybw/Jnkkn7jphzc3ZFfO7dfmpL1Ph74/6xCi3xSvJFyVW90XpMnd766jTaRPsTCg==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /ndarray-gradient/1.0.1: - resolution: {integrity: sha512-+xONVi7xxTCGL6KOb11Yyoe0tPNqAUKF39CvFoRjL5pdOmPd2G2pckK9lD5bpLF3q45LLnYNyiUSJSdNmQ2MTg==} - dependencies: - cwise-compiler: 1.1.3 - dup: 1.0.0 - dev: false - - /ndarray-linear-interpolate/1.0.0: - resolution: {integrity: sha512-UN0f4+6XWsQzJ2pP5gVp+kKn5tJed6mA3K/L50uO619+7LKrjcSNdcerhpqxYaSkbxNJuEN76N05yBBJySnZDw==} - dev: false - - /ndarray-ops/1.2.2: - resolution: {integrity: sha512-BppWAFRjMYF7N/r6Ie51q6D4fs0iiGmeXIACKY66fLpnwIui3Wc3CXiD/30mgLbDjPpSLrsqcp3Z62+IcHZsDw==} - dependencies: - cwise-compiler: 1.1.3 - dev: false - - /ndarray-pack/1.2.1: - resolution: {integrity: sha512-51cECUJMT0rUZNQa09EoKsnFeDL4x2dHRT0VR5U2H5ZgEcm95ZDWcMA5JShroXjHOejmAD/fg8+H+OvUnVXz2g==} - dependencies: - cwise-compiler: 1.1.3 - ndarray: 1.0.19 - dev: false - - /ndarray-scratch/1.2.0: - resolution: {integrity: sha512-a4pASwB1jQyJcKLYrwrladVfDZDUGc78qLJZbHyb1Q4rhte0URhzc6ALQpBcauwgov0sXLwZz3vYH5jKAhSMIg==} - dependencies: - ndarray: 1.0.19 - ndarray-ops: 1.2.2 - typedarray-pool: 1.2.0 - dev: false - - /ndarray-sort/1.0.1: - resolution: {integrity: sha512-Gpyis5NvEPOQVadDOG+Dx8bhYCkaxn5IlA4Ig/jBJIlnW1caDiPneQLzT/+AIMeHEmqlGZfdqO/I1TXJS2neAw==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /ndarray/1.0.19: - resolution: {integrity: sha512-B4JHA4vdyZU30ELBw3g7/p9bZupyew5a7tX1Y/gGeF2hafrPaQZhgrGQfsvgfYbgdFZjYwuEcnaobeM/WMW+HQ==} + /needle/2.9.1: + resolution: {integrity: sha512-6R9fqJ5Zcmf+uYaFgdIHmLwNldn5HbK8L5ybn7Uz+ylX/rnOsSp1AHcvQSrCaFN+qNM1wpymHqD7mVasEOlHGQ==} + engines: {node: '>= 4.4.x'} + hasBin: true dependencies: - iota-array: 1.0.0 - is-buffer: 1.1.6 + debug: 3.2.7 + iconv-lite: 0.4.24 + sax: 1.2.4 + transitivePeerDependencies: + - supports-color dev: false /negotiator/0.6.3: @@ -11578,12 +10986,6 @@ packages: resolution: {integrity: sha512-CXdUiJembsNjuToQvxayPZF9Vqht7hewsvy2sOWafLvi2awflj9mOC6bHIg50orX8IJvWKY9wYQ/zB2kogPslQ==} dev: false - /nextafter/1.0.0: - resolution: {integrity: sha512-7PO+A89Tll2rSEfyrjtqO0MaI37+nnxBdnQcPypfbEYYuGaJxWGCqaOwQX4a3GHNTS08l1kazuiLEWZniZjMUQ==} - dependencies: - double-bits: 1.1.1 - dev: false - /nice-try/1.0.5: resolution: {integrity: sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ==} @@ -11710,10 +11112,6 @@ packages: engines: {node: '>=8'} dev: true - /normals/1.1.0: - resolution: {integrity: sha512-XWeliW48BLvbVJ+cjQAOE+tA0m1M7Yi1iTPphAS9tBmW1A/c/cOVnEUecPCCMH5lEAihAcG6IRle56ls9k3xug==} - dev: false - /npm-run-all/4.1.5: resolution: {integrity: sha512-Oo82gJDAVcaMdi3nuoKFavkIHBRVqQ1qvMb+9LHk/cF4P6B2m8aP04hGf7oL6wZ9BuGwX1onlLhpuoofSyoQDQ==} engines: {node: '>= 4'} @@ -11759,10 +11157,6 @@ packages: is-finite: 1.1.0 dev: false - /numeric/1.2.6: - resolution: {integrity: sha512-avBiDAP8siMa7AfJgYyuxw1oyII4z2sswS23+O+ZfV28KrtNzy0wxUFwi4f3RyM4eeeXNs1CThxR7pb5QQcMiw==} - dev: false - /nwsapi/2.2.7: resolution: {integrity: sha512-ub5E4+FBPKwAZx0UwIQOjYWGHTEq5sPqHQNRN8Z9e4A7u3Tj1weLJsL59yH9vmvqEtBHaOmT6cYQKIZOxp35FQ==} dev: false @@ -11975,13 +11369,6 @@ packages: type-check: 0.3.2 word-wrap: 1.2.5 - /orbit-camera-controller/4.0.0: - resolution: {integrity: sha512-/XTmpr6FUT6MuKPBGN2nv9cS8jhhVs8do71VagBQS5p4rxM04MhqSnI/Uu+gVNN5s6KPcS73o1dHzjuDThEJUA==} - dependencies: - filtered-vector: 1.2.5 - gl-mat4: 1.2.0 - dev: false - /os-browserify/0.3.0: resolution: {integrity: sha512-gjcpUc3clBf9+210TRaDWbf+rZZZEshZ+DlXMRCeAjp0xhTrnQsKHypIy1J3d5hKdUzj69t708EHtU8P6bUn0A==} @@ -12082,13 +11469,6 @@ packages: semver: 6.3.1 dev: true - /pad-left/1.0.2: - resolution: {integrity: sha512-saxSV1EYAytuZDtQYEwi0DPzooG6aN18xyHrnJtzwjVwmMauzkEecd7hynVJGolNGk1Pl9tltmZqfze4TZTCxg==} - engines: {node: '>=0.10.0'} - dependencies: - repeat-string: 1.6.1 - dev: false - /pako/1.0.11: resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==} @@ -12288,19 +11668,6 @@ packages: /performance-now/2.1.0: resolution: {integrity: sha512-7EAHlyLHI56VEIdK57uwHdHKIaAGbnXPiw0yWbarQZOKaKpvUIgW0jWRVLiatnM+XXlSwsanIBH/hzGMJulMow==} - /permutation-parity/1.0.0: - resolution: {integrity: sha512-mRaEvnnWolbZuErWD08StRUZP9YOWG3cURP5nYpRg1D2PENzPXCUrPv8/bOk0tfln0hISLZjOdOcQCbsVpL2nQ==} - dependencies: - typedarray-pool: 1.2.0 - dev: false - - /permutation-rank/1.0.0: - resolution: {integrity: sha512-kmXwlQcd4JlV8g61jz0xDyroFNlJ/mP+KbSBllMuQD7FvaQInRnnAStElcppkUXd8qVFLvemy6msUmBn7sDzHg==} - dependencies: - invert-permutation: 1.0.0 - typedarray-pool: 1.2.0 - dev: false - /pick-by-alias/1.2.0: resolution: {integrity: sha512-ESj2+eBxhGrcA1azgHs7lARG5+5iLakc/6nlfbpjcLl00HuuUOIuORhYXN4D1HfvMSKuVtFQjAlnwi1JHEeDIw==} dev: false @@ -12380,94 +11747,56 @@ packages: find-up: 3.0.0 dev: false - /planar-dual/1.0.2: - resolution: {integrity: sha512-jfQCbX1kXu53+enC+BPQlfoZI1u5m8IUhFVtFG+9tUj84wnuaYNheR69avYWCNXWnUCkwUajmYMqX9M2Ruh4ug==} - dependencies: - compare-angle: 1.0.1 - dup: 1.0.0 - dev: false - - /planar-graph-to-polyline/1.0.6: - resolution: {integrity: sha512-h8a9kdAjo7mRhC0X6HZ42xzFp7vKDZA+Hygyhsq/08Qi4vVAQYJaLLYLvKUUzRbVKvdYqq0reXHyV0EygyEBHA==} - dependencies: - edges-to-adjacency-list: 1.0.0 - planar-dual: 1.0.2 - point-in-big-polygon: 2.0.1 - robust-orientation: 1.2.1 - robust-sum: 1.0.0 - two-product: 1.0.2 - uniq: 1.0.1 - dev: false - /please-upgrade-node/3.2.0: resolution: {integrity: sha512-gQR3WpIgNIKwBMVLkpMUeR3e1/E1y42bqDQZfql+kDeXd8COYfM8PQA4X6y7a8u9Ua9FHmsrrmirW2vHs45hWg==} dependencies: semver-compare: 1.0.0 dev: true - /plotly.js/1.58.5: - resolution: {integrity: sha512-ChTlnFXB4tB0CzcG1mqgUKYnrJsZ8REDGox8BHAa/ltsd48MOAhOmFgjyDxwsXyjjgwOI296GeYDft8g4ftLHQ==} + /plotly.js/2.25.2: + resolution: {integrity: sha512-Pf6dPYGl21W7A3FTgLQ52fpgvrqGhCPDT3+612bxwg4QXlvxhnoFwvuhT1BRW/l2nbYGpRoUH79K54yf2vCMVQ==} dependencies: + '@plotly/d3': 3.8.1 '@plotly/d3-sankey': 0.7.2 '@plotly/d3-sankey-circular': 0.33.1 - '@plotly/point-cluster': 3.1.9 '@turf/area': 6.5.0 '@turf/bbox': 6.5.0 '@turf/centroid': 6.5.0 - alpha-shape: 1.0.0 canvas-fit: 1.5.0 color-alpha: 1.0.4 color-normalize: 1.5.0 color-parse: 1.3.8 color-rgba: 2.1.1 - convex-hull: 1.0.3 country-regex: 1.1.0 - d3: 3.5.17 d3-force: 1.2.1 + d3-format: 1.4.5 + d3-geo: 1.12.1 + d3-geo-projection: 2.9.0 d3-hierarchy: 1.1.9 - d3-interpolate: 1.4.0 + d3-interpolate: 3.0.1 + d3-time: 1.1.0 d3-time-format: 2.3.0 - delaunay-triangulate: 1.1.6 - es6-promise: 4.2.8 fast-isnumeric: 1.1.4 - gl-cone3d: 1.5.2 - gl-contour2d: 1.1.7 - gl-error3d: 1.0.16 - gl-heatmap2d: 1.1.1 - gl-line3d: 1.2.1 gl-mat4: 1.2.0 - gl-mesh3d: 2.3.1 - gl-plot2d: 1.4.5 - gl-plot3d: 2.4.7 - gl-pointcloud2d: 1.0.3 - gl-scatter3d: 1.2.3 - gl-select-box: 1.0.4 - gl-spikes2d: 1.0.2 - gl-streamtube3d: 1.4.1 - gl-surface3d: 1.6.0 gl-text: 1.3.1 glslify: 7.1.1 has-hover: 1.0.1 has-passive-events: 1.0.0 - image-size: 0.7.5 - is-mobile: 2.2.2 + is-mobile: 4.0.0 mapbox-gl: 1.10.1 - matrix-camera-controller: 2.1.4 mouse-change: 1.4.0 mouse-event-offset: 3.0.2 mouse-wheel: 1.2.0 - ndarray: 1.0.19 - ndarray-linear-interpolate: 1.0.0 + native-promise-only: 0.8.1 parse-svg-path: 0.1.2 + point-in-polygon: 1.1.0 polybooljs: 1.2.0 - regl: 1.7.0 + probe-image-size: 7.2.3 + regl: /@plotly/regl/2.1.2 regl-error2d: 2.0.12 regl-line2d: 3.1.2 regl-scatter2d: 3.2.9 regl-splom: 1.0.14 - right-now: 1.0.0 - robust-orientation: 1.2.1 - sane-topojson: 4.0.0 strongly-connected-components: 1.0.1 superscript-text: 1.0.0 svg-path-sdf: 1.1.3 @@ -12476,6 +11805,8 @@ packages: topojson-client: 3.1.0 webgl-context: 2.2.0 world-calendars: 1.0.3 + transitivePeerDependencies: + - supports-color dev: false /plur/3.1.1: @@ -12503,25 +11834,14 @@ packages: - typescript dev: false - /point-in-big-polygon/2.0.1: - resolution: {integrity: sha512-DtrN8pa2VfMlvmWlCcypTFeBE4+OYz1ojDNJLKCWa4doiVAD6PRBbxFYAT71tsp5oKaRXT5sxEiHCAQKb1zr2Q==} - dependencies: - binary-search-bounds: 2.0.5 - interval-tree-1d: 1.0.4 - robust-orientation: 1.2.1 - slab-decomposition: 1.0.3 + /point-in-polygon/1.1.0: + resolution: {integrity: sha512-3ojrFwjnnw8Q9242TzgXuTD+eKiutbzyslcq1ydfu82Db2y+Ogbmyrkpv0Hgj31qwT3lbS9+QAAO/pIQM35XRw==} dev: false /polybooljs/1.2.0: resolution: {integrity: sha512-mKjR5nolISvF+q2BtC1fi/llpxBPTQ3wLWN8+ldzdw2Hocpc8C72ZqnamCM4Z6z+68GVVjkeM01WJegQmZ8MEQ==} dev: false - /polytope-closest-point/1.0.0: - resolution: {integrity: sha512-rvmt1e2ci9AUyWeHg+jsNuhGC4eBtxX4WjD9uDdvQzv2I1CVJSgbblJTslNXpGUu4KZSsUtSzvIdHKRKfRF3kw==} - dependencies: - numeric: 1.2.6 - dev: false - /portfinder/1.0.32_supports-color@6.1.0: resolution: {integrity: sha512-on2ZJVVDXRADWE6jnQaX0ioEylzgBpQk8r55NE4wjXW1ZxO+BgDlY6DXwj20i0V8eB4SenDQ00WEaxfiIQPcxg==} engines: {node: '>= 0.12.0'} @@ -13254,6 +12574,16 @@ packages: parse-ms: 2.1.0 dev: false + /probe-image-size/7.2.3: + resolution: {integrity: sha512-HubhG4Rb2UH8YtV4ba0Vp5bQ7L78RTONYu/ujmCu5nBI8wGv24s4E9xSKBi0N1MowRpxk76pFCpJtW0KPzOK0w==} + dependencies: + lodash.merge: 4.6.2 + needle: 2.9.1 + stream-parser: 0.3.1 + transitivePeerDependencies: + - supports-color + dev: false + /process-nextick-args/2.0.1: resolution: {integrity: sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==} @@ -13409,12 +12739,6 @@ packages: resolution: {integrity: sha512-qxXIEh4pCGfHICj1mAJQ2/2XVZkjCDTcEgfoSQxc/fYivUZxTkk7L3bDBJSoNrEzXI17oUO5Dp07ktqE5KzczA==} engines: {node: '>=0.6'} - /quat-slerp/1.0.1: - resolution: {integrity: sha512-OTozCDeP5sW7cloGR+aIycctZasBhblk1xdsSGP1Iz5pEwDqyChloTmc96xsDfusFD7GRxwDDu+tpJX0Wa1kJw==} - dependencies: - gl-quat: 1.0.0 - dev: false - /query-string/4.3.4: resolution: {integrity: sha512-O2XLNDBIg1DnTOa+2XrIwSiXEV8h2KImXUnjhhn2+UsvZ+Es2uyd5CCRTNQlDGbzUQOW3aYCBx9rVA6dzsiY7Q==} engines: {node: '>=0.10.0'} @@ -13461,12 +12785,6 @@ packages: resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} engines: {node: '>= 0.6'} - /rat-vec/1.1.1: - resolution: {integrity: sha512-FbxGwkQxmw4Jx41LR7yMOR+g8M9TWCEmf/SUBQVLuK2eh0nThnffF7IUualr3XE2x5F8AdLiCVeSGwXd4snfgg==} - dependencies: - big-rat: 1.0.4 - dev: false - /raw-body/2.5.2: resolution: {integrity: sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==} engines: {node: '>= 0.8'} @@ -14103,13 +13421,13 @@ packages: resolution: {integrity: sha512-fBASbA6LnOU9dOU2eW7aQ8xmYBSXUIWr+UmF9b1efZBazGNO+rcXT/icdKnYm2pTwcRylVUYwW7H1PHfLekVzA==} dev: false - /react-plotly.js/2.6.0_f6dluzp62qf57yw3gl4ocsg3e4: + /react-plotly.js/2.6.0_qtjenpcawcnnxnr626ndcvhi4u: resolution: {integrity: sha512-g93xcyhAVCSt9kV1svqG1clAEdL6k3U+jjuSzfTV7owaSU9Go6Ph8bl25J+jKfKvIGAEYpe4qj++WHJuc9IaeA==} peerDependencies: plotly.js: '>1.34.0' react: '>0.13.0' dependencies: - plotly.js: 1.58.5 + plotly.js: 2.25.2 prop-types: 15.8.1 react: 16.14.0 dev: false @@ -14425,14 +13743,6 @@ packages: strip-indent: 2.0.0 dev: true - /reduce-simplicial-complex/1.0.0: - resolution: {integrity: sha512-t+nT7sHDtcxBx8TbglqfLsLKoFiSn9hp6GFojJEThHBAFv72wQeq/uRiPYZa4Xb8FR1Ye1foRcBV3Ki6bgm+pQ==} - dependencies: - cell-orientation: 1.0.1 - compare-cell: 1.0.0 - compare-oriented-cell: 1.0.1 - dev: false - /reflect.getprototypeof/1.0.3: resolution: {integrity: sha512-TTAOZpkJ2YLxl7mVHWrNo3iDMEkYlva/kgFcXndqMgbo/AZUmmavEkdXV+hXtE4P8xdyEKRzalaFqZVuwIk/Nw==} engines: {node: '>= 0.4'} @@ -14601,10 +13911,6 @@ packages: regl-scatter2d: 3.2.9 dev: false - /regl/1.7.0: - resolution: {integrity: sha512-bEAtp/qrtKucxXSJkD4ebopFZYP0q1+3Vb2WECWv/T8yQEgKxDxJ7ztO285tAMaYZVR6mM1GgI6CCn8FROtL1w==} - dev: false - /regl/2.1.0: resolution: {integrity: sha512-oWUce/aVoEvW5l2V0LK7O5KJMzUSKeiOwFuJehzpSFd43dO5spP9r+sSUfhKtsky4u6MCqWJaRL+abzExynfTg==} dev: false @@ -14864,78 +14170,6 @@ packages: classnames: 2.3.2 dev: false - /robust-compress/1.0.0: - resolution: {integrity: sha512-E8btSpQ6zZr7LvRLrLvb+N5rwQ0etUbsXFKv5NQj6TVK6RYT00Qg9iVFvIWR+GxXUvpes7FDN0WfXa3l7wtGOw==} - dev: false - - /robust-determinant/1.1.0: - resolution: {integrity: sha512-xva9bx/vyAv3pVYL2++vlnvM9q7oQOeCS5iscmlWtmaXHEgI4GFWeuYPUVVhvmYwx9N49EsQTonVJihYtcMo1Q==} - dependencies: - robust-compress: 1.0.0 - robust-scale: 1.0.2 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-dot-product/1.0.0: - resolution: {integrity: sha512-Nu/wah8B8RotyZLRPdlEL0ZDh3b7wSwUBLdbTHwS/yw0qqjMJ943PSCkd6EsF5R5QFDWF2x77DGsbmnv9/7/ew==} - dependencies: - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-in-sphere/1.2.1: - resolution: {integrity: sha512-3zJdcMIOP1gdwux93MKTS0RiMYEGwQBoE5R1IW/9ZQmGeZzP7f7i4+xdcK8ujJvF/dEOS1WPuI9IB1WNFbj3Cg==} - dependencies: - robust-scale: 1.0.2 - robust-subtract: 1.0.0 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-linear-solve/1.0.0: - resolution: {integrity: sha512-I1qW8Bl9+UYeGNh2Vt8cwkcD74xWMyjnU6lSVcZrf0eyfwPmreflY3v0SvqCZOj5ddxnSS1Xp31igbFNcg1TGQ==} - dependencies: - robust-determinant: 1.1.0 - dev: false - - /robust-orientation/1.2.1: - resolution: {integrity: sha512-FuTptgKwY6iNuU15nrIJDLjXzCChWB+T4AvksRtwPS/WZ3HuP1CElCm1t+OBfgQKfWbtZIawip+61k7+buRKAg==} - dependencies: - robust-scale: 1.0.2 - robust-subtract: 1.0.0 - robust-sum: 1.0.0 - two-product: 1.0.2 - dev: false - - /robust-product/1.0.0: - resolution: {integrity: sha512-7ww6m+ICW6Dt7ylHVy1aeeNwTfMXfh2BHqHVNE+CHvrU9sI97Vb6uHnid0MN3I9afTI5DXOB7q4SQa2fxuo2Gw==} - dependencies: - robust-scale: 1.0.2 - robust-sum: 1.0.0 - dev: false - - /robust-scale/1.0.2: - resolution: {integrity: sha512-jBR91a/vomMAzazwpsPTPeuTPPmWBacwA+WYGNKcRGSh6xweuQ2ZbjRZ4v792/bZOhRKXRiQH0F48AvuajY0tQ==} - dependencies: - two-product: 1.0.2 - two-sum: 1.0.0 - dev: false - - /robust-segment-intersect/1.0.1: - resolution: {integrity: sha512-QWngxcL7rCRLK7nTMcTNBPi/q+fecrOo6aOtTPnXjT/Dve5AK20DzUSq2fznUS+rCAxyir6OdPgDCzcUxFtJoQ==} - dependencies: - robust-orientation: 1.2.1 - dev: false - - /robust-subtract/1.0.0: - resolution: {integrity: sha512-xhKUno+Rl+trmxAIVwjQMiVdpF5llxytozXJOdoT4eTIqmqsndQqFb1A0oiW3sZGlhMRhOi6pAD4MF1YYW6o/A==} - dev: false - - /robust-sum/1.0.0: - resolution: {integrity: sha512-AvLExwpaqUqD1uwLU6MwzzfRdaI6VEZsyvQ3IAQ0ZJ08v1H+DTyqskrf2ZJyh0BDduFVLN7H04Zmc+qTiahhAw==} - dev: false - /rsvp/4.8.5: resolution: {integrity: sha512-nfMOlASu9OnRJo1mbEk2cz0D56a1MBNrJ7orjRZQG10XDyuvwksKbuXNp6qa+kbn839HwjwhBzhFmdsaEAfauA==} engines: {node: 6.* || >= 7.*} @@ -14997,10 +14231,6 @@ packages: /safer-buffer/2.1.2: resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} - /sane-topojson/4.0.0: - resolution: {integrity: sha512-bJILrpBboQfabG3BNnHI2hZl52pbt80BE09u4WhnrmzuF2JbMKZdl62G5glXskJ46p+gxE2IzOwGj/awR4g8AA==} - dev: false - /sane/4.1.0: resolution: {integrity: sha512-hhbzAgTIX8O7SHfp2c8/kREfEn4qO/9q8C9beyY6+tvZ87EpoZ3i1RIEvp27YBswnNbY9mWd6paKVmKbAgLfZA==} engines: {node: 6.* || 8.* || >= 10.*} @@ -15325,10 +14555,6 @@ packages: /signal-exit/3.0.7: resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} - /signum/0.0.0: - resolution: {integrity: sha512-nct2ZUmwemVxeuPY5h+JLpHGJvLCXXNahGVI7IB3a6Fy5baX9AGSb854HceYH4FBw4eGjoZfEo9YRfkGfKdZQA==} - dev: false - /signum/1.0.0: resolution: {integrity: sha512-yodFGwcyt59XRh7w5W3jPcIQb3Bwi21suEfT7MAWnBX3iCdklJpgDgvGT9o04UonglZN5SNMfJFkHIR/jO8GHw==} dev: false @@ -15339,55 +14565,10 @@ packages: is-arrayish: 0.3.2 dev: false - /simplicial-complex-boundary/1.0.1: - resolution: {integrity: sha512-hz/AaVbs+s08EVoxlbCE68AlC6/mxFJLxJrGRMbDoTjz3030nhcOq+w5+f0/ZaU2EYjmwa8CdVKpiRVIrhaZjA==} - dependencies: - boundary-cells: 2.0.2 - reduce-simplicial-complex: 1.0.0 - dev: false - - /simplicial-complex-contour/1.0.2: - resolution: {integrity: sha512-Janyqvpa7jgr9MJbwR/XGyYz7bdhXNq7zgHxD0G54LCRNyn4bf3Hely2iWQeK/IGu3c5BaWFUh7ElxqXhKrq0g==} - dependencies: - marching-simplex-table: 1.0.0 - ndarray: 1.0.19 - ndarray-sort: 1.0.1 - typedarray-pool: 1.2.0 - dev: false - - /simplicial-complex/0.3.3: - resolution: {integrity: sha512-JFSxp7I5yORuKSuwGN96thhkqZVvYB4pkTMkk+PKP2QsOYYU1e84OBoHwOpFyFmjyvB9B3UDZKzHQI5S/CPUPA==} - dependencies: - bit-twiddle: 0.0.2 - union-find: 0.0.4 - dev: false - - /simplicial-complex/1.0.0: - resolution: {integrity: sha512-mHauIKSOy3GquM5VnYEiu7eP5y4A8BiaN9ezUUgyYFz1k68PqDYcyaH3kenp2cyvWZE96QKE3nrxYw65Allqiw==} - dependencies: - bit-twiddle: 1.0.2 - union-find: 1.0.2 - dev: false - - /simplify-planar-graph/2.0.1: - resolution: {integrity: sha512-KdC2ZPFvrGl9+lH/P3Yik7G0si2Zpk6Xiqjq8l9U1lOox5a/9dGLjevi9tvqoh4V7yQbs7fs6+rNCOAdrzUktw==} - dependencies: - robust-orientation: 1.2.1 - simplicial-complex: 0.3.3 - dev: false - /sisteransi/1.0.5: resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} dev: false - /slab-decomposition/1.0.3: - resolution: {integrity: sha512-1EfR304JHvX9vYQkUi4AKqN62mLsjk6W45xTk/TxwN8zd3HGwS7PVj9zj0I6fgCZqfGlimDEY+RzzASHn97ZmQ==} - dependencies: - binary-search-bounds: 2.0.5 - functional-red-black-tree: 1.0.1 - robust-orientation: 1.2.1 - dev: false - /slash/1.0.0: resolution: {integrity: sha512-3TYDR7xWt4dIqV2JauJr+EJeW356RXijHeUlO+8djJ+uBXPn8/2dpzBc8yQhh583sVvc9CvFAeQVgijsH+PNNg==} engines: {node: '>=0.10.0'} @@ -15560,13 +14741,6 @@ packages: - supports-color dev: false - /split-polygon/1.0.0: - resolution: {integrity: sha512-nBFcgQUVEE8dcOjuKaRdlM53k8RxUYpRxZ//n0pHJQGhbVscrsti+gllJI3pK3y7fgFwGWgt7NFhAX5sz0UoWQ==} - dependencies: - robust-dot-product: 1.0.0 - robust-sum: 1.0.0 - dev: false - /split-string/3.1.0: resolution: {integrity: sha512-NzNVhJDYpwceVVii8/Hu6DKfD2G+NrQHlS/V/qgv763EYudVwEcMQNxd2lh+0VrUByXN/oJkl5grOhYWvQUYiw==} engines: {node: '>=0.10.0'} @@ -15576,10 +14750,6 @@ packages: /sprintf-js/1.0.3: resolution: {integrity: sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==} - /sprintf-js/1.1.2: - resolution: {integrity: sha512-VE0SOVEHCk7Qc8ulkWw3ntAzXuqf7S2lvwQaDLRnUeIEaKNQJzV6BwmLKhOqT61aGhfUMrXeaBk+oDGCzvhcug==} - dev: false - /sshpk/1.17.0: resolution: {integrity: sha512-/9HIEs1ZXGhSPE8X6Ccm7Nam1z8KcoCqPdI7ecm1N33EzAetWahvQWVqLZtaZQ+IDKX4IyA2o0gBzqIMkAagHQ==} engines: {node: '>=0.10.0'} @@ -15680,6 +14850,14 @@ packages: to-arraybuffer: 1.0.1 xtend: 4.0.2 + /stream-parser/0.3.1: + resolution: {integrity: sha512-bJ/HgKq41nlKvlhccD5kaCr/P+Hu0wPNKPJOH7en+YrJu/9EgqUF+88w5Jb6KNcjOFMhfX4B2asfeAtIGuHObQ==} + dependencies: + debug: 2.6.9 + transitivePeerDependencies: + - supports-color + dev: false + /stream-shift/1.0.1: resolution: {integrity: sha512-AiisoFqQ0vbGcZgQPY1cdP2I76glaVA/RauYR4G4thNFgkTqr90yXTo4LYX60Jl+sIlPNHHdGSwo01AvbKUSVQ==} dev: false @@ -15934,14 +15112,6 @@ packages: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} - /surface-nets/1.0.2: - resolution: {integrity: sha512-Se+BaCb5yc8AV1IfT6TwTWEe/KuzzjzcMQQCbcIahzk9xRO5bIxxGM2MmKxE9nmq8+RD8DLBLXu0BjXoRs21iw==} - dependencies: - ndarray-extract-contour: 1.0.1 - triangulate-hypercube: 1.0.1 - zero-crossings: 1.0.1 - dev: false - /svg-arc-to-cubic-bezier/3.2.0: resolution: {integrity: sha512-djbJ/vZKZO+gPoSDThGNpKDO+o+bAeA4XQKovvkNCqnIS2t+S4qnLAGQhyyrulhCFRl1WWzAp0wUDV8PpTVU3g==} dev: false @@ -16084,12 +15254,6 @@ packages: require-main-filename: 2.0.0 dev: false - /text-cache/4.2.2: - resolution: {integrity: sha512-zky+UDYiX0a/aPw/YTBD+EzKMlCTu1chFuCMZeAkgoRiceySdROu1V2kJXhCbtEdBhiOviYnAdGiSYl58HW0ZQ==} - dependencies: - vectorize-text: 3.2.2 - dev: false - /text-table/0.2.0: resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} @@ -16241,20 +15405,6 @@ packages: punycode: 2.3.0 dev: false - /triangulate-hypercube/1.0.1: - resolution: {integrity: sha512-SAIacSBfUNfgeCna8q2i+1taOtFJkYuOqpduaJ1KUeOJpqc0lLKMYzPnZb4CA6KCOiD8Pd4YbuVq41wa9dvWyw==} - dependencies: - gamma: 0.1.0 - permutation-parity: 1.0.0 - permutation-rank: 1.0.0 - dev: false - - /triangulate-polyline/1.0.3: - resolution: {integrity: sha512-crJcVFtVPFYQ8r9iIhe9JqkauDvNWDSZLot8ly3DniSCO+zyUfKbtfD3fEoBaA5uMrQU/zBi11NBuVQeSToToQ==} - dependencies: - cdt2d: 1.0.0 - dev: false - /trim-newlines/2.0.0: resolution: {integrity: sha512-MTBWv3jhVjTU7XR3IQHllbiJs8sc75a80OEhB6or/q7pLTWgQ0bMGQXXYQSrSuXe6WiKWDZ5txXY5P59a/coVA==} engines: {node: '>=4'} @@ -16305,25 +15455,9 @@ packages: dependencies: safe-buffer: 5.2.1 - /turntable-camera-controller/3.0.1: - resolution: {integrity: sha512-UOGu9W/Mx053pAaczi0BEPqvWJOqSgtpdigWG9C8dX8rQVdyl2hWmpdJW3m15QrGxJtJHIhhDTHVtTZzPkd/FA==} - dependencies: - filtered-vector: 1.2.5 - gl-mat4: 1.2.0 - gl-vec3: 1.1.3 - dev: false - /tweetnacl/0.14.5: resolution: {integrity: sha512-KXXFFdAbFXY4geFIwoyNK+f5Z1b7swfXABfL7HXCmoIWMKU3dmS26672A4EeQtDzLKy7SXmfBu51JolvEKwtGA==} - /two-product/1.0.2: - resolution: {integrity: sha512-vOyrqmeYvzjToVM08iU52OFocWT6eB/I5LUWYnxeAPGXAhAxXYU/Yr/R2uY5/5n4bvJQL9AQulIuxpIsMoT8XQ==} - dev: false - - /two-sum/1.0.0: - resolution: {integrity: sha512-phP48e8AawgsNUjEY2WvoIWqdie8PoiDZGxTDv70LDr01uX5wLEQbOgSP7Z/B6+SW5oLtbe8qaYX2fKJs3CGTw==} - dev: false - /type-check/0.3.2: resolution: {integrity: sha512-ZCmOJdvOWDBYJlzAoFkC+Q0+bUyEOS1ltgp1MGU03fqHG+dbi9tBFU2Rd9QKiDZFAYrhPh2JUf7rZRIuHRKtOg==} engines: {node: '>= 0.8.0'} @@ -16464,14 +15598,6 @@ packages: engines: {node: '>=4'} dev: false - /union-find/0.0.4: - resolution: {integrity: sha512-207oken6EyGDCBK5l/LTPsWfgy8N8s6idwRK2TG0ssWhzPlxEDdBA8nIV+eLbkEMdA8pAwE8F7/xwv2sCESVjQ==} - dev: false - - /union-find/1.0.2: - resolution: {integrity: sha512-wFA9bMD/40k7ZcpKVXfu6X1qD3ri5ryO8HUsuA1RnxPCQl66Mu6DgkxyR+XNnd+osD0aLENixcJVFj+uf+O4gw==} - dev: false - /union-value/1.0.1: resolution: {integrity: sha512-tJfXmxMeWYnczCVs7XAEvIV7ieppALdyepWMkHkwciRpZraG/xwT+s2JN8+pr1+8jCRf80FFzvr+MpQeeoF4Xg==} engines: {node: '>=0.10.0'} @@ -16718,18 +15844,6 @@ packages: resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} engines: {node: '>= 0.8'} - /vectorize-text/3.2.2: - resolution: {integrity: sha512-34NVOCpMMQVXujU4vb/c6u98h6djI0jGdtC202H4Huvzn48B6ARsR7cmGh1xsAc0pHNQiUKGK/aHF05VtGv+eA==} - dependencies: - cdt2d: 1.0.0 - clean-pslg: 1.1.2 - ndarray: 1.0.19 - planar-graph-to-polyline: 1.0.6 - simplify-planar-graph: 2.0.1 - surface-nets: 1.0.2 - triangulate-polyline: 1.0.3 - dev: false - /vendors/1.0.4: resolution: {integrity: sha512-/juG65kTL4Cy2su4P8HjtkTxk6VmJDiOPBufWniqQ6wknac6jNiXS9vU+hO3wgusiyqWlzTbVHi0dyJqRONg3w==} dev: false @@ -16812,10 +15926,6 @@ packages: resolution: {integrity: sha512-lNR9aAefbGPpHO7AEnY0hCFjz1eTkWCXYvkTRrTHs9qv8zJp+SkVYpzfLIFXQQiG3tVvbNFQgVg2bQS8YGgxyw==} dev: false - /weakmap-shim/1.1.1: - resolution: {integrity: sha512-/wNyG+1FpiHhnfQo+TuA/XAUpvOOkKVl0A4qpT+oGcj5SlZCLmM+M1Py/3Sj8sy+YrEauCVITOxCsZKo6sPbQg==} - dev: false - /webgl-context/2.2.0: resolution: {integrity: sha512-q/fGIivtqTT7PEoF07axFIlHNk/XCPaYpq64btnepopSWvKNFkoORlQYgqDigBIuGA1ExnFd/GnSUnBNEPQY7Q==} dependencies: @@ -17461,9 +16571,3 @@ packages: y18n: 4.0.3 yargs-parser: 15.0.3 dev: true - - /zero-crossings/1.0.1: - resolution: {integrity: sha512-iNIldMZaDtAyIJMJ8NnGVHeejH//y4eVmpXriM+q/B/BPNz+2E7oAgSnw9MXqCd3RbQ8W+hor7T2jEyRoc/s2A==} - dependencies: - cwise-compiler: 1.1.3 - dev: false From 727ef2c988f6dd689b6fd182fad836d623d8fa8e Mon Sep 17 00:00:00 2001 From: SaketaChalamchala Date: Fri, 12 Apr 2024 14:40:31 +0800 Subject: [PATCH 14/17] HDDS-10630. Add missing parent directories deleted between initiate and complete MPU (#6496) (cherry picked from commit c1b27a87b57442f8c765fcd7e60802d72a16a932) --- .../S3MultipartUploadCompleteRequest.java | 94 +++++++++++++++++-- ...MultipartUploadCompleteRequestWithFSO.java | 39 +++++++- .../S3MultipartUploadCompleteResponse.java | 8 ++ ...ultipartUploadCompleteResponseWithFSO.java | 45 ++++++++- .../s3/multipart/TestS3MultipartResponse.java | 5 +- 5 files changed, 180 insertions(+), 11 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 451599dee8ea..7cecc97ca4f0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -23,6 +23,7 @@ import java.io.IOException; import java.nio.file.InvalidPathException; +import java.nio.file.Paths; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -31,6 +32,10 @@ import java.util.function.BiFunction; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.om.OzoneConfigUtil; +import org.apache.hadoop.ozone.om.request.file.OMDirectoryCreateRequestWithFSO; +import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.protocolPB.OMPBHelper; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -40,6 +45,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; @@ -180,11 +186,62 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); - String ozoneKey = omMetadataManager.getOzoneKey( - volumeName, bucketName, keyName); - - String dbOzoneKey = - getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName); + List missingParentInfos; + OMFileRequest.OMPathInfoWithFSO pathInfoFSO = OMFileRequest + .verifyDirectoryKeysInPath(omMetadataManager, volumeName, bucketName, + keyName, Paths.get(keyName)); + missingParentInfos = OMDirectoryCreateRequestWithFSO + .getAllMissingParentDirInfo(ozoneManager, keyArgs, omBucketInfo, + pathInfoFSO, trxnLogIndex); + + if (missingParentInfos != null) { + final long volumeId = omMetadataManager.getVolumeId(volumeName); + final long bucketId = omMetadataManager.getBucketId(volumeName, + bucketName); + + // add all missing parents to directory table + addMissingParentsToCache(omBucketInfo, missingParentInfos, + omMetadataManager, volumeId, bucketId, trxnLogIndex); + + String multipartOpenKey = omMetadataManager + .getMultipartKey(volumeId, bucketId, + pathInfoFSO.getLastKnownParentId(), + pathInfoFSO.getLeafNodeName(), + keyArgs.getMultipartUploadID()); + + if (getOmKeyInfoFromOpenKeyTable(multipartOpenKey, + keyName, omMetadataManager) == null) { + + final ReplicationConfig replicationConfig = OzoneConfigUtil + .resolveReplicationConfigPreference(keyArgs.getType(), + keyArgs.getFactor(), keyArgs.getEcReplicationConfig(), + omBucketInfo != null ? + omBucketInfo.getDefaultReplicationConfig() : + null, ozoneManager); + + OmKeyInfo keyInfoFromArgs = new OmKeyInfo.Builder() + .setVolumeName(volumeName) + .setBucketName(bucketName) + .setKeyName(keyName) + .setCreationTime(keyArgs.getModificationTime()) + .setModificationTime(keyArgs.getModificationTime()) + .setReplicationConfig(replicationConfig) + .setOmKeyLocationInfos(Collections.singletonList( + new OmKeyLocationInfoGroup(0, new ArrayList<>(), true))) + .setAcls(getAclsForKey(keyArgs, omBucketInfo, pathInfoFSO, + ozoneManager.getPrefixManager())) + .setObjectID(pathInfoFSO.getLeafNodeObjectId()) + .setUpdateID(trxnLogIndex) + .setFileEncryptionInfo(keyArgs.hasFileEncryptionInfo() ? + OMPBHelper.convert(keyArgs.getFileEncryptionInfo()) : null) + .setParentObjectID(pathInfoFSO.getLastKnownParentId()) + .build(); + + // Add missing multi part info to open key table + addMultiPartToCache(omMetadataManager, multipartOpenKey, + pathInfoFSO, keyInfoFromArgs, trxnLogIndex); + } + } String dbMultipartOpenKey = getDBMultipartOpenKey(volumeName, bucketName, keyName, uploadID, @@ -193,6 +250,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, OmMultipartKeyInfo multipartKeyInfo = omMetadataManager .getMultipartInfoTable().get(multipartKey); + String ozoneKey = omMetadataManager.getOzoneKey( + volumeName, bucketName, keyName); + + String dbOzoneKey = + getDBOzoneKey(omMetadataManager, volumeName, bucketName, keyName); + // Check for directory exists with same name for the LEGACY_FS, // if it exists throw error. checkDirectoryAlreadyExists(ozoneManager, omBucketInfo, keyName, @@ -288,7 +351,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = getOmClientResponse(multipartKey, omResponse, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove, omBucketInfo, - volumeId, bucketId); + volumeId, bucketId, missingParentInfos, multipartKeyInfo); result = Result.SUCCESS; } else { @@ -329,7 +392,8 @@ protected OMClientResponse getOmClientResponse(String multipartKey, OMResponse.Builder omResponse, String dbMultipartOpenKey, OmKeyInfo omKeyInfo, List allKeyInfoToRemove, OmBucketInfo omBucketInfo, - long volumeId, long bucketId) { + long volumeId, long bucketId, List missingParentInfos, + OmMultipartKeyInfo multipartKeyInfo) { return new S3MultipartUploadCompleteResponse(omResponse.build(), multipartKey, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove, @@ -468,6 +532,22 @@ protected String getDBOzoneKey(OMMetadataManager omMetadataManager, return omMetadataManager.getOzoneKey(volumeName, bucketName, keyName); } + protected void addMissingParentsToCache(OmBucketInfo omBucketInfo, + List missingParentInfos, + OMMetadataManager omMetadataManager, + long volumeId, long bucketId, long transactionLogIndex + ) throws IOException { + // FSO is disabled. Do nothing. + } + + protected void addMultiPartToCache( + OMMetadataManager omMetadataManager, String multipartOpenKey, + OMFileRequest.OMPathInfoWithFSO pathInfoFSO, OmKeyInfo omKeyInfo, + long transactionLogIndex + ) throws IOException { + // FSO is disabled. Do nothing. + } + protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneKey, String keyName, OMMetadataManager omMetadataManager) throws IOException { return omMetadataManager.getKeyTable(getBucketLayout()).get(dbOzoneKey); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java index 35867bb84e85..811072bc5ea0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequestWithFSO.java @@ -23,7 +23,9 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.om.response.s3.multipart.S3MultipartUploadCompleteResponse; @@ -74,6 +76,37 @@ protected void checkDirectoryAlreadyExists(OzoneManager ozoneManager, } } + @Override + protected void addMissingParentsToCache(OmBucketInfo omBucketInfo, + List missingParentInfos, + OMMetadataManager omMetadataManager, long volumeId, long bucketId, + long transactionLogIndex) throws IOException { + + // validate and update namespace for missing parent directory. + checkBucketQuotaInNamespace(omBucketInfo, missingParentInfos.size()); + omBucketInfo.incrUsedNamespace(missingParentInfos.size()); + + // Add cache entries for the missing parent directories. + OMFileRequest.addDirectoryTableCacheEntries(omMetadataManager, + volumeId, bucketId, transactionLogIndex, + missingParentInfos, null); + } + + @Override + protected void addMultiPartToCache( + OMMetadataManager omMetadataManager, String multipartOpenKey, + OMFileRequest.OMPathInfoWithFSO pathInfoFSO, OmKeyInfo omKeyInfo, + long transactionLogIndex + ) throws IOException { + + // Add multi part to cache + OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, + multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(), + transactionLogIndex); + + } + + @Override protected OmKeyInfo getOmKeyInfoFromKeyTable(String dbOzoneFileKey, String keyName, OMMetadataManager omMetadataManager) throws IOException { @@ -162,11 +195,13 @@ protected OMClientResponse getOmClientResponse(String multipartKey, OzoneManagerProtocolProtos.OMResponse.Builder omResponse, String dbMultipartOpenKey, OmKeyInfo omKeyInfo, List allKeyInfoToRemove, OmBucketInfo omBucketInfo, - long volumeId, long bucketId) { + long volumeId, long bucketId, List missingParentInfos, + OmMultipartKeyInfo multipartKeyInfo) { return new S3MultipartUploadCompleteResponseWithFSO(omResponse.build(), multipartKey, dbMultipartOpenKey, omKeyInfo, allKeyInfoToRemove, - getBucketLayout(), omBucketInfo, volumeId, bucketId); + getBucketLayout(), omBucketInfo, volumeId, bucketId, + missingParentInfos, multipartKeyInfo); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java index 829457cd4bd0..2c250362fe7f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java @@ -131,4 +131,12 @@ protected String addToKeyTable(OMMetadataManager omMetadataManager, protected OmKeyInfo getOmKeyInfo() { return omKeyInfo; } + + protected OmBucketInfo getOmBucketInfo() { + return omBucketInfo; + } + + protected String getMultiPartKey() { + return multipartKey; + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java index 3a6e1e39d56b..fe279eaf24ad 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java @@ -22,7 +22,9 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.response.CleanupTableInfo; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; @@ -53,6 +55,10 @@ public class S3MultipartUploadCompleteResponseWithFSO private long volumeId; private long bucketId; + private List missingParentInfos; + + private OmMultipartKeyInfo multipartKeyInfo; + @SuppressWarnings("checkstyle:ParameterNumber") public S3MultipartUploadCompleteResponseWithFSO( @Nonnull OMResponse omResponse, @@ -62,11 +68,15 @@ public S3MultipartUploadCompleteResponseWithFSO( @Nonnull List allKeyInfoToRemove, @Nonnull BucketLayout bucketLayout, @CheckForNull OmBucketInfo omBucketInfo, - @Nonnull long volumeId, @Nonnull long bucketId) { + @Nonnull long volumeId, @Nonnull long bucketId, + List missingParentInfos, + OmMultipartKeyInfo multipartKeyInfo) { super(omResponse, multipartKey, multipartOpenKey, omKeyInfo, allKeyInfoToRemove, bucketLayout, omBucketInfo); this.volumeId = volumeId; this.bucketId = bucketId; + this.missingParentInfos = missingParentInfos; + this.multipartKeyInfo = multipartKeyInfo; } /** @@ -79,6 +89,39 @@ public S3MultipartUploadCompleteResponseWithFSO( checkStatusNotOK(); } + @Override + public void addToDBBatch(OMMetadataManager omMetadataManager, + BatchOperation batchOperation) throws IOException { + if (missingParentInfos != null) { + // Create missing parent directory entries. + for (OmDirectoryInfo parentDirInfo : missingParentInfos) { + final String parentKey = omMetadataManager.getOzonePathKey( + volumeId, bucketId, parentDirInfo.getParentObjectID(), + parentDirInfo.getName()); + omMetadataManager.getDirectoryTable().putWithBatch(batchOperation, + parentKey, parentDirInfo); + } + + // namespace quota changes for parent directory + String bucketKey = omMetadataManager.getBucketKey( + getOmBucketInfo().getVolumeName(), + getOmBucketInfo().getBucketName()); + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + bucketKey, getOmBucketInfo()); + + if (OMFileRequest.getOmKeyInfoFromFileTable(true, + omMetadataManager, getMultiPartKey(), getOmKeyInfo().getKeyName()) + != null) { + // Add multi part to open key table. + OMFileRequest.addToOpenFileTableForMultipart(omMetadataManager, + batchOperation, + getOmKeyInfo(), multipartKeyInfo.getUploadID(), volumeId, + bucketId); + } + } + super.addToDBBatch(omMetadataManager, batchOperation); + } + @Override protected String addToKeyTable(OMMetadataManager omMetadataManager, BatchOperation batchOperation) throws IOException { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java index 51963a00a1cb..a2192ddb880c 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartResponse.java @@ -306,6 +306,8 @@ public S3MultipartUploadCompleteResponse createS3CompleteMPUResponseFSO( String multipartKey = omMetadataManager .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID); + OmMultipartKeyInfo multipartKeyInfo = omMetadataManager + .getMultipartInfoTable().get(multipartKey); final long volumeId = omMetadataManager.getVolumeId(volumeName); final long bucketId = omMetadataManager.getBucketId(volumeName, @@ -324,7 +326,8 @@ public S3MultipartUploadCompleteResponse createS3CompleteMPUResponseFSO( return new S3MultipartUploadCompleteResponseWithFSO(omResponse, multipartKey, multipartOpenKey, omKeyInfo, allKeyInfoToRemove, - getBucketLayout(), omBucketInfo, volumeId, bucketId); + getBucketLayout(), omBucketInfo, volumeId, bucketId, null, + multipartKeyInfo); } protected S3InitiateMultipartUploadResponse getS3InitiateMultipartUploadResp( From 966fe53ea312428c73bafbbb344d5c91ce852500 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 17 Apr 2024 06:10:26 +0200 Subject: [PATCH 15/17] HDDS-10692. ozone s3 getsecret prints some internal details (#6531) (cherry picked from commit 72240fa7263993426f580cb8b807a47843f0fccc) --- .../org/apache/hadoop/ozone/om/helpers/S3SecretValue.java | 4 +--- hadoop-ozone/dist/src/main/smoketest/security/S3-secret.robot | 2 ++ 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java index cb1ed0976a08..20c145bd0c06 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/S3SecretValue.java @@ -101,9 +101,7 @@ public S3Secret getProtobuf() { @Override public String toString() { - return "awsAccessKey=" + kerberosID + "\nawsSecret=" + awsSecret + - "\nisDeleted=" + isDeleted + "\ntransactionLogIndex=" + - transactionLogIndex; + return "awsAccessKey=" + kerberosID + "\nawsSecret=" + awsSecret; } @Override diff --git a/hadoop-ozone/dist/src/main/smoketest/security/S3-secret.robot b/hadoop-ozone/dist/src/main/smoketest/security/S3-secret.robot index 2f1eda13e206..9d42fa154e24 100644 --- a/hadoop-ozone/dist/src/main/smoketest/security/S3-secret.robot +++ b/hadoop-ozone/dist/src/main/smoketest/security/S3-secret.robot @@ -26,6 +26,8 @@ GetSecret success ${output}= Execute ozone s3 getsecret -u testuser2 Should contain ${output} awsAccessKey Should contain ${output} awsSecret + Should not contain ${output} isDeleted + Should not contain ${output} transactionLogIndex GetSecret failure ${output2}= Execute and Ignore Error ozone s3 getsecret -u testuser2 From 78cfb9a7b0627ef4b31805cce2550fcc6dc58c36 Mon Sep 17 00:00:00 2001 From: Sammi Chen Date: Wed, 17 Apr 2024 16:44:03 +0800 Subject: [PATCH 16/17] HDDS-10680. Duplicate delete key blocks sent to SCM (#6513) (cherry picked from commit 7bb79380623703df7e58c67887972ce2834a7f7f) --- .../service/AbstractKeyDeletingService.java | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index 21ad0872769a..429e286287c1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -116,7 +116,10 @@ protected int processKeyDeletes(List keyBlocksList, } List blockDeletionResults = scmClient.deleteKeyBlocks(keyBlocksList); + LOG.info("{} BlockGroup deletion are acked by SCM in {} ms", + keyBlocksList.size(), Time.monotonicNow() - startTime); if (blockDeletionResults != null) { + startTime = Time.monotonicNow(); if (isRatisEnabled()) { delCount = submitPurgeKeysRequest(blockDeletionResults, keysToModify, snapTableKey); @@ -126,11 +129,8 @@ protected int processKeyDeletes(List keyBlocksList, // OMRequest model. delCount = deleteAllKeys(blockDeletionResults, manager); } - if (LOG.isDebugEnabled()) { - LOG.debug("Blocks for {} (out of {}) keys are deleted in {} ms", - delCount, blockDeletionResults.size(), - Time.monotonicNow() - startTime); - } + LOG.info("Blocks for {} (out of {}) keys are deleted from DB in {} ms", + delCount, blockDeletionResults.size(), Time.monotonicNow() - startTime); } return delCount; } @@ -277,12 +277,14 @@ protected RaftClientRequest createRaftClientRequestForPurge( * Parse Volume and Bucket Name from ObjectKey and add it to given map of * keys to be purged per bucket. */ - private void addToMap(Map, List> map, - String objectKey) { + private void addToMap(Map, List> map, String objectKey) { // Parse volume and bucket name String[] split = objectKey.split(OM_KEY_PREFIX); - Preconditions.assertTrue(split.length > 3, "Volume and/or Bucket Name " + - "missing from Key Name."); + Preconditions.assertTrue(split.length >= 3, "Volume and/or Bucket Name " + + "missing from Key Name " + objectKey); + if (split.length == 3) { + LOG.warn("{} missing Key Name", objectKey); + } Pair volumeBucketPair = Pair.of(split[1], split[2]); if (!map.containsKey(volumeBucketPair)) { map.put(volumeBucketPair, new ArrayList<>()); From c6f9084717aba75f256cb590f4fd1c461a56d5e6 Mon Sep 17 00:00:00 2001 From: Ivan Andika Date: Fri, 19 Apr 2024 23:57:55 +0800 Subject: [PATCH 17/17] HDDS-10717. nodeFailureTimeoutMs should be initialized before syncTimeoutRetry (#6560) (cherry picked from commit 5dbd3cfe302477adf277ab69ccf98b66d606d607) --- .../common/transport/server/ratis/XceiverServerRatis.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java index 4688ce4b2789..c47f80f3b421 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java @@ -173,7 +173,6 @@ private XceiverServerRatis(DatanodeDetails dd, this.streamEnable = conf.getBoolean( OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED, OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLED_DEFAULT); - RaftProperties serverProperties = newRaftProperties(); this.context = context; this.dispatcher = dispatcher; this.containerController = containerController; @@ -184,6 +183,7 @@ private XceiverServerRatis(DatanodeDetails dd, shouldDeleteRatisLogDirectory = ratisServerConfig.shouldDeleteRatisLogDirectory(); + RaftProperties serverProperties = newRaftProperties(); this.server = RaftServer.newBuilder().setServerId(raftPeerId) .setProperties(serverProperties)