From 95739ea66c1dfc639b149acd59658220c71232a7 Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Fri, 27 Mar 2020 17:22:40 +0800 Subject: [PATCH 01/51] HDDS-3185 Construct a standalone ratis server for SCM. (#720) Contributed-by: Li Cheng --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 87 ++++ .../org/apache/hadoop/ozone/OzoneConsts.java | 5 +- .../src/main/resources/ozone-default.xml | 180 +++++++ .../apache/hadoop/hdds/scm/ha/SCMHAUtils.java | 37 ++ .../hadoop/hdds/scm/ha/SCMNodeDetails.java | 169 +++++++ .../hadoop/hdds/scm/ha/package-info.java | 22 + .../hadoop/hdds/scm/ratis/SCMRatisServer.java | 461 ++++++++++++++++++ .../hdds/scm/ratis/SCMStateMachine.java | 35 ++ .../scm/server/StorageContainerManager.java | 29 +- .../hdds/scm/ratis/TestSCMRatisServer.java | 158 ++++++ .../ozone/TestOzoneConfigurationFields.java | 3 + 11 files changed, 1183 insertions(+), 3 deletions(-) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMRatisServer.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMStateMachine.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ratis/TestSCMRatisServer.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index c6b1100a6c8b..37a1833e5809 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -274,6 +274,16 @@ public final class ScmConfigKeys { // able to send back a new list to the datanodes. public static final String OZONE_SCM_NAMES = "ozone.scm.names"; + public static final String OZONE_SCM_INTERNAL_SERVICE_ID = + "ozone.scm.internal.service.id"; + + public static final String OZONE_SCM_SERVICE_IDS_KEY = + "ozone.scm.service.ids"; + public static final String OZONE_SCM_NODES_KEY = + "ozone.scm.nodes"; + public static final String OZONE_SCM_NODE_ID_KEY = + "ozone.scm.node.id"; + public static final int OZONE_SCM_DEFAULT_PORT = OZONE_SCM_DATANODE_PORT_DEFAULT; // The path where datanode ID is to be written to. @@ -357,6 +367,83 @@ public final class ScmConfigKeys { public static final String HDDS_TRACING_ENABLED = "hdds.tracing.enabled"; public static final boolean HDDS_TRACING_ENABLED_DEFAULT = false; + // SCM Ratis related + public static final String OZONE_SCM_HA_ENABLE_KEY + = "ozone.scm.ratis.enable"; + public static final boolean OZONE_SCM_HA_ENABLE_DEFAULT + = false; + public static final String OZONE_SCM_RATIS_PORT_KEY + = "ozone.scm.ratis.port"; + public static final int OZONE_SCM_RATIS_PORT_DEFAULT + = 9864; + public static final String OZONE_SCM_RATIS_RPC_TYPE_KEY + = "ozone.scm.ratis.rpc.type"; + public static final String OZONE_SCM_RATIS_RPC_TYPE_DEFAULT + = "GRPC"; + + // SCM Ratis Log configurations + public static final String OZONE_SCM_RATIS_STORAGE_DIR + = "ozone.scm.ratis.storage.dir"; + public static final String OZONE_SCM_RATIS_SEGMENT_SIZE_KEY + = "ozone.scm.ratis.segment.size"; + public static final String OZONE_SCM_RATIS_SEGMENT_SIZE_DEFAULT + = "16KB"; + public static final String OZONE_SCM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY + = "ozone.scm.ratis.segment.preallocated.size"; + public static final String OZONE_SCM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT + = "16KB"; + + // SCM Ratis Log Appender configurations + public static final String + OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS = + "ozone.scm.ratis.log.appender.queue.num-elements"; + public static final int + OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT = 1024; + public static final String OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT = + "ozone.scm.ratis.log.appender.queue.byte-limit"; + public static final String + OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT = "32MB"; + public static final String OZONE_SCM_RATIS_LOG_PURGE_GAP = + "ozone.scm.ratis.log.purge.gap"; + public static final int OZONE_SCM_RATIS_LOG_PURGE_GAP_DEFAULT = 1000000; + + // SCM Ratis server configurations + public static final String OZONE_SCM_RATIS_SERVER_REQUEST_TIMEOUT_KEY + = "ozone.scm.ratis.server.request.timeout"; + public static final TimeDuration + OZONE_SCM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT + = TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS); + public static final String + OZONE_SCM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_KEY + = "ozone.scm.ratis.server.retry.cache.timeout"; + public static final TimeDuration + OZONE_SCM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT + = TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS); + public static final String OZONE_SCM_RATIS_MINIMUM_TIMEOUT_KEY + = "ozone.scm.ratis.minimum.timeout"; + public static final TimeDuration OZONE_SCM_RATIS_MINIMUM_TIMEOUT_DEFAULT + = TimeDuration.valueOf(1, TimeUnit.SECONDS); + + // SCM Ratis Leader Election configurations + public static final String + OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = + "ozone.scm.leader.election.minimum.timeout.duration"; + public static final TimeDuration + OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = + TimeDuration.valueOf(1, TimeUnit.SECONDS); + public static final String OZONE_SCM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_KEY + = "ozone.scm.ratis.server.failure.timeout.duration"; + public static final TimeDuration + OZONE_SCM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT + = TimeDuration.valueOf(120, TimeUnit.SECONDS); + + // SCM Leader server role check interval + public static final String OZONE_SCM_RATIS_SERVER_ROLE_CHECK_INTERVAL_KEY + = "ozone.scm.ratis.server.role.check.interval"; + public static final TimeDuration + OZONE_SCM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT + = TimeDuration.valueOf(15, TimeUnit.SECONDS); + /** * Never constructed. */ diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 5a1f9152a709..8343dadeb5f9 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -345,10 +345,11 @@ private OzoneConsts() { public static final String GDPR_LENGTH = "length"; public static final String GDPR_SECRET = "secret"; public static final String GDPR_ALGORITHM = "algorithm"; - - + // Transaction Info public static final String TRANSACTION_INFO_KEY = "#TRANSACTIONINFO"; public static final String TRANSACTION_INFO_SPLIT_KEY = "#"; + // SCM HA + public static final String SCM_SERVICE_ID_DEFAULT = "scmServiceIdDefault"; } diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 79f3bcb5808b..cd350989be85 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1908,6 +1908,186 @@ OZONE, HDDS, SECURITY SCM security server port. + + ozone.scm.service.ids + + OZONE, SCM, HA + + Comma-separated list of SCM service Ids. This property allows the client + to figure out quorum of OzoneManager address. + + + + ozone.scm.internal.service.id + + OZONE, SCM, HA + + Service ID of the SCM. If this is not set fall back to + ozone.scm.service.ids to find the service ID it belongs to. + + + + ozone.scm.nodes.EXAMPLESCMSERVICEID + + OZONE, SCM, HA + + Comma-separated list of SCM node Ids for a given SCM service ID (eg. + EXAMPLESCMSERVICEID). The SCM service ID should be the value (one of the + values if there are multiple) set for the parameter ozone.scm.service.ids. + + Unique identifiers for each SCM Node, delimited by commas. This will be + used by SCMs in HA setup to determine all the SCMs + belonging to the same SCM in the cluster. For example, if you + used “scmService1” as the SCM service ID previously, and you wanted to + use “scm1”, “scm2” and "scm3" as the individual IDs of the SCMs, + you would configure a property ozone.scm.nodes.scmService1, and its value + "scm1,scm2,scm3". + + + + ozone.scm.node.id + + OZONE, SCM, HA + + The ID of this SCM node. If the SCM node ID is not configured it + is determined automatically by matching the local node's address + with the configured address. + + If node ID is not deterministic from the configuration, then it is set + to the scmId from the SCM version file. + + + + ozone.scm.ratis.enable + false + OZONE, SCM, HA, RATIS + Property to enable or disable Ratis server on SCM. + Please note - this is a temporary property to disable SCM Ratis server. + + + + + ozone.scm.ratis.port + 9872 + OZONE, SCM, HA, RATIS + + The port number of the SCM's Ratis server. + + + + + ozone.scm.ratis.rpc.type + GRPC + OZONE, SCM, HA, RATIS + Ratis supports different kinds of transports like netty, GRPC, + Hadoop RPC etc. This picks one of those for this cluster. + + + + + ozone.scm.ratis.storage.dir + + OZONE, SCM, HA, RATIS, STORAGE + This directory is used for storing SCM's Ratis metadata like + logs. If this is not set then default metadata dirs is used. A warning + will be logged if this not set. Ideally, this should be mapped to a + fast disk like an SSD. + If undefined, SCM ratis storage dir will fallback to ozone.metadata.dirs. + This fallback approach is not recommended for production environments. + + + + + ozone.scm.ratis.segment.size + 16KB + OZONE, SCM, HA, RATIS, PERFORMANCE + The size of the raft segment used by Apache Ratis on SCM. + (16 KB by default) + + + + + ozone.scm.ratis.segment.preallocated.size + 16KB + OZONE, SCM, HA, RATIS, PERFORMANCE + The size of the buffer which is preallocated for raft segment + used by Apache Ratis on SCM.(16 KB by default) + + + + + ozone.scm.ratis.log.appender.queue.num-elements + 1024 + OZONE, DEBUG, SCM, HA, RATIS + Number of operation pending with Raft's Log Worker. + + + + ozone.scm.ratis.log.appender.queue.byte-limit + 32MB + OZONE, DEBUG, SCM, HA, RATIS + Byte limit for Raft's Log Worker queue. + + + + ozone.scm.ratis.log.purge.gap + 1000000 + OZONE, SCM, HA, RATIS + The minimum gap between log indices for Raft server to purge + its log segments after taking snapshot. + + + + ozone.scm.ratis.server.request.timeout + 3s + OZONE, SCM, HA, RATIS + The timeout duration for SCM's ratis server request . + + + + ozone.scm.ratis.server.retry.cache.timeout + 600000ms + OZONE, SCM, HA, RATIS + Retry Cache entry timeout for SCM's ratis server. + + + + ozone.scm.ratis.minimum.timeout + 1s + OZONE, SCM, HA, RATIS + The minimum timeout duration for SCM's Ratis server rpc. + + + + + ozone.scm.leader.election.minimum.timeout.duration + 1s + OZONE, SCM, HA, RATIS + The minimum timeout duration for SCM ratis leader election. + Default is 1s. + + + + + ozone.scm.ratis.server.failure.timeout.duration + 120s + OZONE, SCM, HA, RATIS + The timeout duration for ratis server failure detection, + once the threshold has reached, the ratis state machine will be informed + about the failure in the ratis ring. + + + + + ozone.scm.ratis.server.role.check.interval + 15s + OZONE, SCM, HA, RATIS + The interval between SCM leader performing a role + check on its ratis server. Ratis server informs SCM if it + loses the leader role. The scheduled check is an secondary + check to ensure that the leader role is updated periodically + . + hdds.metadata.dir diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java new file mode 100644 index 000000000000..c0364adb1b16 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; + +/** + * Utility class used by SCM HA. + */ +public final class SCMHAUtils { + private SCMHAUtils() { + // not used + } + + // Check if SCM HA is enabled. + public static boolean isSCMHAEnabled(OzoneConfiguration conf) { + return conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, + ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java new file mode 100644 index 000000000000..8d66187ee748 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java @@ -0,0 +1,169 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.ozone.OzoneConsts; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_INTERNAL_SERVICE_ID; + +/** + * Construct SCM node details. + */ +public final class SCMNodeDetails { + private String scmServiceId; + private String scmNodeId; + private InetSocketAddress rpcAddress; + private int rpcPort; + private int ratisPort; + private String httpAddress; + private String httpsAddress; + + public static final Logger LOG = + LoggerFactory.getLogger(SCMNodeDetails.class); + + /** + * Constructs SCMNodeDetails object. + */ + private SCMNodeDetails(String serviceId, String nodeId, + InetSocketAddress rpcAddr, int rpcPort, int ratisPort, + String httpAddress, String httpsAddress) { + this.scmServiceId = serviceId; + this.scmNodeId = nodeId; + this.rpcAddress = rpcAddr; + this.rpcPort = rpcPort; + this.ratisPort = ratisPort; + this.httpAddress = httpAddress; + this.httpsAddress = httpsAddress; + } + + @Override + public String toString() { + return "SCMNodeDetails[" + + "scmServiceId=" + scmServiceId + + ", scmNodeId=" + scmNodeId + + ", rpcAddress=" + rpcAddress + + ", rpcPort=" + rpcPort + + ", ratisPort=" + ratisPort + + ", httpAddress=" + httpAddress + + ", httpsAddress=" + httpsAddress + + "]"; + } + + /** + * Builder class for SCMNodeDetails. + */ + public static class Builder { + private String scmServiceId; + private String scmNodeId; + private InetSocketAddress rpcAddress; + private int rpcPort; + private int ratisPort; + private String httpAddr; + private String httpsAddr; + + public Builder setRpcAddress(InetSocketAddress rpcAddr) { + this.rpcAddress = rpcAddr; + this.rpcPort = rpcAddress.getPort(); + return this; + } + + public Builder setRatisPort(int port) { + this.ratisPort = port; + return this; + } + + public Builder setSCMServiceId(String serviceId) { + this.scmServiceId = serviceId; + return this; + } + + public Builder setSCMNodeId(String nodeId) { + this.scmNodeId = nodeId; + return this; + } + + public Builder setHttpAddress(String httpAddress) { + this.httpAddr = httpAddress; + return this; + } + + public Builder setHttpsAddress(String httpsAddress) { + this.httpsAddr = httpsAddress; + return this; + } + + public SCMNodeDetails build() { + return new SCMNodeDetails(scmServiceId, scmNodeId, rpcAddress, rpcPort, + ratisPort, httpAddr, httpsAddr); + } + } + + public String getSCMServiceId() { + return scmServiceId; + } + + public String getSCMNodeId() { + return scmNodeId; + } + + public InetSocketAddress getRpcAddress() { + return rpcAddress; + } + + public InetAddress getAddress() { + return rpcAddress.getAddress(); + } + + public int getRatisPort() { + return ratisPort; + } + + public int getRpcPort() { + return rpcPort; + } + + public String getRpcAddressString() { + return NetUtils.getHostPortString(rpcAddress); + } + + public static SCMNodeDetails initStandAlone( + OzoneConfiguration conf) throws IOException { + String localSCMServiceId = conf.getTrimmed(OZONE_SCM_INTERNAL_SERVICE_ID); + int ratisPort = conf.getInt( + ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY, + ScmConfigKeys.OZONE_SCM_RATIS_PORT_DEFAULT); + InetSocketAddress rpcAddress = new InetSocketAddress( + InetAddress.getLocalHost(), 0); + SCMNodeDetails scmNodeDetails = new SCMNodeDetails.Builder() + .setRatisPort(ratisPort) + .setRpcAddress(rpcAddress) + .setSCMNodeId(localSCMServiceId) + .setSCMServiceId(OzoneConsts.SCM_SERVICE_ID_DEFAULT) + .build(); + return scmNodeDetails; + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java new file mode 100644 index 000000000000..06fe1685717d --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.ha; + +/** + * This package contains classes related to SCM HA. + */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMRatisServer.java new file mode 100644 index 000000000000..af1e5c2c30ba --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMRatisServer.java @@ -0,0 +1,461 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.ratis; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Strings; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.ratis.RaftConfigKeys; +import org.apache.ratis.conf.RaftProperties; +import org.apache.ratis.grpc.GrpcConfigKeys; +import org.apache.ratis.netty.NettyConfigKeys; +import org.apache.ratis.proto.RaftProtos; +import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.GroupInfoReply; +import org.apache.ratis.protocol.GroupInfoRequest; +import org.apache.ratis.protocol.RaftGroup; +import org.apache.ratis.protocol.RaftGroupId; +import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.rpc.RpcType; +import org.apache.ratis.rpc.SupportedRpcType; +import org.apache.ratis.server.RaftServer; +import org.apache.ratis.server.RaftServerConfigKeys; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.util.LifeCycle; +import org.apache.ratis.util.SizeInBytes; +import org.apache.ratis.util.TimeDuration; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.File; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +/** + * Class for SCM Ratis Server. + */ +public final class SCMRatisServer { + private static final Logger LOG = LoggerFactory + .getLogger(SCMRatisServer.class); + + private final StorageContainerManager scm; + private final SCMStateMachine scmStateMachine; + + private final int port; + private final InetSocketAddress scmRatisAddress; + private final RaftServer server; + private final RaftGroupId raftGroupId; + private final RaftGroup raftGroup; + private final RaftPeerId raftPeerId; + + private final ClientId clientId = ClientId.randomId(); + private final ScheduledExecutorService scheduledRoleChecker; + private long roleCheckInitialDelayMs = 1000; // 1 second default + private long roleCheckIntervalMs; + private ReentrantReadWriteLock roleCheckLock = new ReentrantReadWriteLock(); + private Optional cachedPeerRole = Optional.empty(); + private Optional cachedLeaderPeerId = Optional.empty(); + + private static final AtomicLong CALL_ID_COUNTER = new AtomicLong(); + private static long nextCallId() { + return CALL_ID_COUNTER.getAndIncrement() & Long.MAX_VALUE; + } + + private SCMRatisServer(Configuration conf, + StorageContainerManager scm, + String raftGroupIdStr, RaftPeerId localRaftPeerId, + InetSocketAddress addr, List raftPeers) + throws IOException { + this.scm = scm; + this.scmRatisAddress = addr; + this.port = addr.getPort(); + RaftProperties serverProperties = newRaftProperties(conf); + + this.raftPeerId = localRaftPeerId; + this.raftGroupId = RaftGroupId.valueOf( + getRaftGroupIdFromOmServiceId(raftGroupIdStr)); + this.raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers); + + StringBuilder raftPeersStr = new StringBuilder(); + for (RaftPeer peer : raftPeers) { + raftPeersStr.append(", ").append(peer.getAddress()); + } + LOG.info("Instantiating SCM Ratis server with GroupID: {} and " + + "Raft Peers: {}", raftGroupIdStr, raftPeersStr.toString().substring(2)); + this.scmStateMachine = getStateMachine(); + + this.server = RaftServer.newBuilder() + .setServerId(this.raftPeerId) + .setGroup(this.raftGroup) + .setProperties(serverProperties) + .setStateMachine(scmStateMachine) + .build(); + + // Run a scheduler to check and update the server role on the leader + // periodically + this.scheduledRoleChecker = Executors.newSingleThreadScheduledExecutor(); + this.scheduledRoleChecker.scheduleWithFixedDelay(new Runnable() { + @Override + public void run() { + // Run this check only on the leader OM + if (cachedPeerRole.isPresent() && + cachedPeerRole.get() == RaftProtos.RaftPeerRole.LEADER) { + updateServerRole(); + } + } + }, roleCheckInitialDelayMs, roleCheckIntervalMs, TimeUnit.MILLISECONDS); + } + + public static SCMRatisServer newSCMRatisServer( + Configuration conf, StorageContainerManager scm, + SCMNodeDetails scmNodeDetails, List peers) + throws IOException { + String scmServiceId = scmNodeDetails.getSCMServiceId(); + + String scmNodeId = scmNodeDetails.getSCMNodeId(); + RaftPeerId localRaftPeerId = RaftPeerId.getRaftPeerId(scmNodeId); + InetSocketAddress ratisAddr = new InetSocketAddress( + scmNodeDetails.getAddress(), scmNodeDetails.getRatisPort()); + + RaftPeer localRaftPeer = new RaftPeer(localRaftPeerId, ratisAddr); + + List raftPeers = new ArrayList<>(); + raftPeers.add(localRaftPeer); + + for (SCMNodeDetails peer : peers) { + String peerNodeId = peer.getSCMNodeId(); + InetSocketAddress peerRatisAddr = new InetSocketAddress( + peer.getAddress(), peer.getRatisPort()); + RaftPeerId raftPeerId = RaftPeerId.valueOf(peerNodeId); + RaftPeer raftPeer = new RaftPeer(raftPeerId, peerRatisAddr); + // Add other SCMs in Ratis ring + raftPeers.add(raftPeer); + } + + return new SCMRatisServer(conf, scm, scmServiceId, localRaftPeerId, + ratisAddr, raftPeers); + } + + private UUID getRaftGroupIdFromOmServiceId(String scmServiceId) { + return UUID.nameUUIDFromBytes(scmServiceId.getBytes( + StandardCharsets.UTF_8)); + } + + private SCMStateMachine getStateMachine() { + return new SCMStateMachine(this); + } + + private RaftProperties newRaftProperties(Configuration conf){ + final RaftProperties properties = new RaftProperties(); + // Set RPC type + final String rpcType = conf.get( + ScmConfigKeys.OZONE_SCM_RATIS_RPC_TYPE_KEY, + ScmConfigKeys.OZONE_SCM_RATIS_RPC_TYPE_DEFAULT); + final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); + RaftConfigKeys.Rpc.setType(properties, rpc); + // Set the ratis port number + if (rpc == SupportedRpcType.GRPC) { + GrpcConfigKeys.Server.setPort(properties, port); + } else if (rpc == SupportedRpcType.NETTY) { + NettyConfigKeys.Server.setPort(properties, port); + } + // Set Ratis storage directory + String storageDir = SCMRatisServer.getSCMRatisDirectory(conf); + RaftServerConfigKeys.setStorageDirs(properties, + Collections.singletonList(new File(storageDir))); + // Set RAFT segment size + final int raftSegmentSize = (int) conf.getStorageSize( + ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_SIZE_KEY, + ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_SIZE_DEFAULT, + StorageUnit.BYTES); + RaftServerConfigKeys.Log.setSegmentSizeMax(properties, + SizeInBytes.valueOf(raftSegmentSize)); + // Set RAFT segment pre-allocated size + final int raftSegmentPreallocatedSize = (int) conf.getStorageSize( + ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, + ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, + StorageUnit.BYTES); + int logAppenderQueueNumElements = conf.getInt( + ScmConfigKeys.OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, + ScmConfigKeys.OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); + final int logAppenderQueueByteLimit = (int) conf.getStorageSize( + ScmConfigKeys.OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, + ScmConfigKeys.OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, + StorageUnit.BYTES); + RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, + logAppenderQueueNumElements); + RaftServerConfigKeys.Log.Appender.setBufferByteLimit(properties, + SizeInBytes.valueOf(logAppenderQueueByteLimit)); + RaftServerConfigKeys.Log.setPreallocatedSize(properties, + SizeInBytes.valueOf(raftSegmentPreallocatedSize)); + RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties, + false); + final int logPurgeGap = conf.getInt( + ScmConfigKeys.OZONE_SCM_RATIS_LOG_PURGE_GAP, + ScmConfigKeys.OZONE_SCM_RATIS_LOG_PURGE_GAP_DEFAULT); + RaftServerConfigKeys.Log.setPurgeGap(properties, logPurgeGap); + // For grpc set the maximum message size + // TODO: calculate the optimal max message size + GrpcConfigKeys.setMessageSizeMax(properties, + SizeInBytes.valueOf(logAppenderQueueByteLimit)); + + // Set the server request timeout + TimeUnit serverRequestTimeoutUnit = + ScmConfigKeys.OZONE_SCM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT.getUnit(); + long serverRequestTimeoutDuration = conf.getTimeDuration( + ScmConfigKeys.OZONE_SCM_RATIS_SERVER_REQUEST_TIMEOUT_KEY, + ScmConfigKeys.OZONE_SCM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT + .getDuration(), serverRequestTimeoutUnit); + final TimeDuration serverRequestTimeout = TimeDuration.valueOf( + serverRequestTimeoutDuration, serverRequestTimeoutUnit); + RaftServerConfigKeys.Rpc.setRequestTimeout(properties, + serverRequestTimeout); + // Set timeout for server retry cache entry + TimeUnit retryCacheTimeoutUnit = ScmConfigKeys + .OZONE_SCM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT.getUnit(); + long retryCacheTimeoutDuration = conf.getTimeDuration( + ScmConfigKeys.OZONE_SCM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_KEY, + ScmConfigKeys.OZONE_SCM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT + .getDuration(), retryCacheTimeoutUnit); + final TimeDuration retryCacheTimeout = TimeDuration.valueOf( + retryCacheTimeoutDuration, retryCacheTimeoutUnit); + RaftServerConfigKeys.RetryCache.setExpiryTime(properties, + retryCacheTimeout); + // Set the server min and max timeout + TimeUnit serverMinTimeoutUnit = + ScmConfigKeys.OZONE_SCM_RATIS_MINIMUM_TIMEOUT_DEFAULT.getUnit(); + long serverMinTimeoutDuration = conf.getTimeDuration( + ScmConfigKeys.OZONE_SCM_RATIS_MINIMUM_TIMEOUT_KEY, + ScmConfigKeys.OZONE_SCM_RATIS_MINIMUM_TIMEOUT_DEFAULT + .getDuration(), serverMinTimeoutUnit); + final TimeDuration serverMinTimeout = TimeDuration.valueOf( + serverMinTimeoutDuration, serverMinTimeoutUnit); + long serverMaxTimeoutDuration = + serverMinTimeout.toLong(TimeUnit.MILLISECONDS) + 200; + final TimeDuration serverMaxTimeout = TimeDuration.valueOf( + serverMaxTimeoutDuration, serverMinTimeoutUnit); + RaftServerConfigKeys.Rpc.setTimeoutMin(properties, + serverMinTimeout); + RaftServerConfigKeys.Rpc.setTimeoutMax(properties, + serverMaxTimeout); + // Set the number of maximum cached segments + RaftServerConfigKeys.Log.setMaxCachedSegmentNum(properties, 2); + // TODO: set max write buffer size + // Set the ratis leader election timeout + TimeUnit leaderElectionMinTimeoutUnit = + ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + .getUnit(); + long leaderElectionMinTimeoutduration = conf.getTimeDuration( + ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT + .getDuration(), leaderElectionMinTimeoutUnit); + final TimeDuration leaderElectionMinTimeout = TimeDuration.valueOf( + leaderElectionMinTimeoutduration, leaderElectionMinTimeoutUnit); + RaftServerConfigKeys.Rpc.setTimeoutMin(properties, + leaderElectionMinTimeout); + long leaderElectionMaxTimeout = leaderElectionMinTimeout.toLong( + TimeUnit.MILLISECONDS) + 200; + RaftServerConfigKeys.Rpc.setTimeoutMax(properties, + TimeDuration.valueOf(leaderElectionMaxTimeout, TimeUnit.MILLISECONDS)); + TimeUnit nodeFailureTimeoutUnit = + ScmConfigKeys.OZONE_SCM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT + .getUnit(); + long nodeFailureTimeoutDuration = conf.getTimeDuration( + ScmConfigKeys.OZONE_SCM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_KEY, + ScmConfigKeys.OZONE_SCM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT + .getDuration(), nodeFailureTimeoutUnit); + final TimeDuration nodeFailureTimeout = TimeDuration.valueOf( + nodeFailureTimeoutDuration, nodeFailureTimeoutUnit); + RaftServerConfigKeys.Notification.setNoLeaderTimeout(properties, + nodeFailureTimeout); + RaftServerConfigKeys.Rpc.setSlownessTimeout(properties, + nodeFailureTimeout); + + // Ratis leader role check + TimeUnit roleCheckIntervalUnit = + ScmConfigKeys.OZONE_SCM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT + .getUnit(); + long roleCheckIntervalDuration = conf.getTimeDuration( + ScmConfigKeys.OZONE_SCM_RATIS_SERVER_ROLE_CHECK_INTERVAL_KEY, + ScmConfigKeys.OZONE_SCM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT + .getDuration(), nodeFailureTimeoutUnit); + this.roleCheckIntervalMs = TimeDuration.valueOf( + roleCheckIntervalDuration, roleCheckIntervalUnit) + .toLong(TimeUnit.MILLISECONDS); + this.roleCheckInitialDelayMs = leaderElectionMinTimeout + .toLong(TimeUnit.MILLISECONDS); + + return properties; + } + + /** + * Start the Ratis server. + * @throws IOException + */ + public void start() throws IOException { + LOG.info("Starting {} {} at port {}", getClass().getSimpleName(), + server.getId(), port); + server.start(); + } + + /** + * Stop the Ratis server. + */ + public void stop() { + try { + server.close(); + scmStateMachine.stop(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + + private boolean checkCachedPeerRoleIsLeader() { + this.roleCheckLock.readLock().lock(); + try { + if (cachedPeerRole.isPresent() && + cachedPeerRole.get() == RaftProtos.RaftPeerRole.LEADER) { + return true; + } + return false; + } finally { + this.roleCheckLock.readLock().unlock(); + } + } + + public boolean isLeader() { + if (checkCachedPeerRoleIsLeader()) { + return true; + } + + // Get the server role from ratis server and update the cached values. + updateServerRole(); + + // After updating the server role, check and return if leader or not. + return checkCachedPeerRoleIsLeader(); + } + + @VisibleForTesting + public LifeCycle.State getServerState() { + return server.getLifeCycleState(); + } + + @VisibleForTesting + public RaftPeerId getRaftPeerId() { + return this.raftPeerId; + } + + public RaftGroup getRaftGroup() { + return this.raftGroup; + } + + /** + * Get the local directory where ratis logs will be stored. + */ + public static String getSCMRatisDirectory(Configuration conf) { + String storageDir = conf.get(ScmConfigKeys.OZONE_SCM_RATIS_STORAGE_DIR); + + if (Strings.isNullOrEmpty(storageDir)) { + storageDir = ServerUtils.getDefaultRatisDirectory(conf); + } + return storageDir; + } + + public Optional getCachedLeaderPeerId() { + this.roleCheckLock.readLock().lock(); + try { + return cachedLeaderPeerId; + } finally { + this.roleCheckLock.readLock().unlock(); + } + } + + public int getServerPort() { + return port; + } + + public void updateServerRole() { + try { + GroupInfoReply groupInfo = getGroupInfo(); + RaftProtos.RoleInfoProto roleInfoProto = groupInfo.getRoleInfoProto(); + RaftProtos.RaftPeerRole thisNodeRole = roleInfoProto.getRole(); + + if (thisNodeRole.equals(RaftProtos.RaftPeerRole.LEADER)) { + setServerRole(thisNodeRole, raftPeerId); + + } else if (thisNodeRole.equals(RaftProtos.RaftPeerRole.FOLLOWER)) { + ByteString leaderNodeId = roleInfoProto.getFollowerInfo() + .getLeaderInfo().getId().getId(); + // There may be a chance, here we get leaderNodeId as null. For + // example, in 3 node OM Ratis, if 2 SCM nodes are down, there will + // be no leader. + RaftPeerId leaderPeerId = null; + if (leaderNodeId != null && !leaderNodeId.isEmpty()) { + leaderPeerId = RaftPeerId.valueOf(leaderNodeId); + } + + setServerRole(thisNodeRole, leaderPeerId); + + } else { + setServerRole(thisNodeRole, null); + + } + } catch (IOException e) { + LOG.error("Failed to retrieve RaftPeerRole. Setting cached role to " + + "{} and resetting leader info.", + RaftProtos.RaftPeerRole.UNRECOGNIZED, e); + setServerRole(null, null); + } + } + + private GroupInfoReply getGroupInfo() throws IOException { + GroupInfoRequest groupInfoRequest = new GroupInfoRequest(clientId, + raftPeerId, raftGroupId, nextCallId()); + GroupInfoReply groupInfo = server.getGroupInfo(groupInfoRequest); + return groupInfo; + } + + private void setServerRole(RaftProtos.RaftPeerRole currentRole, + RaftPeerId leaderPeerId) { + this.roleCheckLock.writeLock().lock(); + try { + this.cachedPeerRole = Optional.ofNullable(currentRole); + this.cachedLeaderPeerId = Optional.ofNullable(leaderPeerId); + } finally { + this.roleCheckLock.writeLock().unlock(); + } + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMStateMachine.java new file mode 100644 index 000000000000..502260a9c8f6 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMStateMachine.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.ratis; + +import org.apache.ratis.statemachine.impl.BaseStateMachine; + +/** + * Class for SCM StateMachine. + */ +public class SCMStateMachine extends BaseStateMachine { + //TODO to be implemented + public SCMStateMachine(SCMRatisServer ratisServer) { + + } + + public void stop() { + return; + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 4b4e073e33e3..4f30e66f3a0b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -30,6 +30,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.Map; +import java.util.Collections; import java.util.Objects; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; @@ -42,6 +43,10 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; import org.apache.hadoop.hdds.scm.PlacementPolicy; +import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; +import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; +import org.apache.hadoop.hdds.scm.ratis.SCMRatisServer; +import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.block.BlockManager; @@ -90,7 +95,6 @@ import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl; import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.utils.HddsVersionInfo; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.IOUtils; @@ -188,6 +192,9 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl private CertificateServer certificateServer; private GrpcTlsConfig grpcTlsConfig; + // SCM HA related + private SCMRatisServer scmRatisServer; + private JvmPauseMonitor jvmPauseMonitor; private final OzoneConfiguration configuration; private SCMContainerMetrics scmContainerMetrics; @@ -255,6 +262,12 @@ public StorageContainerManager(OzoneConfiguration conf, loginAsSCMUser(conf); } + if (SCMHAUtils.isSCMHAEnabled(conf)) { + initializeRatisServer(); + } else { + scmRatisServer = null; + } + // Creates the SCM DBs or opens them if it exists. // A valid pointer to the store is required by all the other services below. initalizeMetadataStore(conf, configurator); @@ -1109,4 +1122,18 @@ public SCMMetadataStore getScmMetadataStore() { public NetworkTopology getClusterMap() { return this.clusterMap; } + + private void initializeRatisServer() throws IOException { + if (scmRatisServer == null) { + SCMNodeDetails scmNodeDetails = SCMNodeDetails + .initStandAlone(configuration); + //TODO enable Ratis ring + scmRatisServer = SCMRatisServer.newSCMRatisServer(configuration, this, + scmNodeDetails, Collections.EMPTY_LIST); + if (scmRatisServer != null) { + LOG.info("SCM Ratis server initialized at port {}", + scmRatisServer.getServerPort()); + } + } + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ratis/TestSCMRatisServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ratis/TestSCMRatisServer.java new file mode 100644 index 000000000000..f29fb5fed35b --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ratis/TestSCMRatisServer.java @@ -0,0 +1,158 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.ratis; + +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.HddsTestUtils; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.test.GenericTestUtils; +import org.apache.ratis.protocol.RaftGroupId; +import org.apache.ratis.util.LifeCycle; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Collections; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; + +/** + * Test class for SCM Ratis Server. + */ +public class TestSCMRatisServer { + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private OzoneConfiguration conf; + private SCMRatisServer scmRatisServer; + private StorageContainerManager scm; + private String scmId; + private SCMNodeDetails scmNodeDetails; + private static final long LEADER_ELECTION_TIMEOUT = 500L; + + @Before + public void init() throws Exception { + conf = new OzoneConfiguration(); + scmId = UUID.randomUUID().toString(); + conf.setTimeDuration( + ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); + int ratisPort = conf.getInt( + ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY, + ScmConfigKeys.OZONE_SCM_RATIS_PORT_DEFAULT); + InetSocketAddress rpcAddress = new InetSocketAddress( + InetAddress.getLocalHost(), 0); + scmNodeDetails = new SCMNodeDetails.Builder() + .setRatisPort(ratisPort) + .setRpcAddress(rpcAddress) + .setSCMNodeId(scmId) + .setSCMServiceId(OzoneConsts.SCM_SERVICE_ID_DEFAULT) + .build(); + + // Standalone SCM Ratis server + initSCM(); + scm = HddsTestUtils.getScm(conf); + scm.start(); + scmRatisServer = SCMRatisServer.newSCMRatisServer( + conf, scm, scmNodeDetails, Collections.EMPTY_LIST); + scmRatisServer.start(); + } + + @After + public void shutdown() { + if (scmRatisServer != null) { + scmRatisServer.stop(); + } + if (scm != null) { + scm.stop(); + } + } + + @Test + public void testStartSCMRatisServer() throws Exception { + Assert.assertEquals("Ratis Server should be in running state", + LifeCycle.State.RUNNING, scmRatisServer.getServerState()); + } + + @Test + public void verifyRaftGroupIdGenerationWithCustomOmServiceId() throws + Exception { + String customScmServiceId = "scmIdCustom123"; + OzoneConfiguration newConf = new OzoneConfiguration(); + String newOmId = UUID.randomUUID().toString(); + String path = GenericTestUtils.getTempPath(newOmId); + Path metaDirPath = Paths.get(path, "scm-meta"); + newConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); + newConf.setTimeDuration( + ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, + LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); + int ratisPort = 9873; + InetSocketAddress rpcAddress = new InetSocketAddress( + InetAddress.getLocalHost(), 0); + SCMNodeDetails nodeDetails = new SCMNodeDetails.Builder() + .setRpcAddress(rpcAddress) + .setRatisPort(ratisPort) + .setSCMNodeId(newOmId) + .setSCMServiceId(customScmServiceId) + .build(); + // Starts a single node Ratis server + scmRatisServer.stop(); + SCMRatisServer newScmRatisServer = SCMRatisServer + .newSCMRatisServer(newConf, scm, nodeDetails, + Collections.emptyList()); + newScmRatisServer.start(); + + UUID uuid = UUID.nameUUIDFromBytes(customScmServiceId.getBytes()); + RaftGroupId raftGroupId = newScmRatisServer.getRaftGroup().getGroupId(); + Assert.assertEquals(uuid, raftGroupId.getUuid()); + Assert.assertEquals(raftGroupId.toByteString().size(), 16); + newScmRatisServer.stop(); + } + + private void initSCM() throws IOException { + String clusterId = UUID.randomUUID().toString(); + scmId = UUID.randomUUID().toString(); + + final String path = folder.newFolder().toString(); + Path scmPath = Paths.get(path, "scm-meta"); + Files.createDirectories(scmPath); + conf.set(OZONE_METADATA_DIRS, scmPath.toString()); + SCMStorageConfig scmStore = new SCMStorageConfig(conf); + scmStore.setClusterId(clusterId); + scmStore.setScmId(scmId); + // writes the version file properties + scmStore.initialize(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index b9ae815018d2..41f68b68de4e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -54,6 +54,8 @@ public void initializeMemberVariables() { errorIfMissingXmlProps = true; xmlPropsToSkipCompare.add("hadoop.tags.custom"); xmlPropsToSkipCompare.add("ozone.om.nodes.EXAMPLEOMSERVICEID"); + xmlPropsToSkipCompare.add("ozone.scm.nodes.EXAMPLESCMSERVICEID"); + xmlPrefixToSkipCompare.add("ipc.client.rpc-timeout.ms"); addPropertiesNotInXml(); } @@ -65,6 +67,7 @@ private void addPropertiesNotInXml() { HddsConfigKeys.HDDS_SECURITY_PROVIDER, HddsConfigKeys.HDDS_X509_CRL_NAME, // HDDS-2873 OMConfigKeys.OZONE_OM_NODES_KEY, + ScmConfigKeys.OZONE_SCM_NODES_KEY, OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE, OzoneConfigKeys.OZONE_S3_AUTHINFO_MAX_LIFETIME_KEY, ReconServerConfigKeys.OZONE_RECON_SCM_DB_DIR From 31c7386cb3de1735ee710822992b9dbf805668bd Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Fri, 17 Apr 2020 15:39:03 +0800 Subject: [PATCH 02/51] HDDS-3187 Construct SCM StateMachine. (#819) Co-authored-by: Li Cheng --- .../org/apache/hadoop/ozone/OzoneConsts.java | 5 + .../org/apache/hadoop/hdds/scm/ScmUtils.java | 12 + .../hdds/scm/container/ContainerManager.java | 5 + .../scm/container/SCMContainerManager.java | 21 +- .../apache/hadoop/hdds/scm/ha/SCMHAUtils.java | 42 +++ .../hadoop/hdds/scm/ha/SCMNodeDetails.java | 18 +- .../hdds/scm/ratis/SCMStateMachine.java | 35 --- .../scm/server/StorageContainerManager.java | 59 ++++- .../{ => server}/ratis/SCMRatisServer.java | 25 +- .../server/ratis/SCMRatisSnapshotInfo.java | 179 +++++++++++++ .../scm/server/ratis/SCMStateMachine.java | 240 ++++++++++++++++++ .../scm/{ => server}/ratis/package-info.java | 2 +- .../ratis/TestSCMRatisServer.java | 23 +- .../scm/server/ratis/TestSCMStateMachine.java | 120 +++++++++ 14 files changed, 714 insertions(+), 72 deletions(-) delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMStateMachine.java rename hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/{ => server}/ratis/SCMRatisServer.java (97%) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisSnapshotInfo.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java rename hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/{ => server}/ratis/package-info.java (94%) rename hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/{ => server}/ratis/TestSCMRatisServer.java (86%) create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java index 8343dadeb5f9..7d7a870ce034 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java @@ -352,4 +352,9 @@ private OzoneConsts() { // SCM HA public static final String SCM_SERVICE_ID_DEFAULT = "scmServiceIdDefault"; + + // SCM Ratis snapshot file to store the last applied index + public static final String SCM_RATIS_SNAPSHOT_INDEX = "scmRatisSnapshotIndex"; + + public static final String SCM_RATIS_SNAPSHOT_TERM = "scmRatisSnapshotTerm"; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java index 426341a32f40..bb48654e8d53 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ScmUtils.java @@ -25,6 +25,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.File; + /** * SCM utility class. */ @@ -48,4 +50,14 @@ public static void preCheck(ScmOps operation, Precheck... preChecks) } } + /** + * Create SCM directory file based on given path. + */ + public static File createSCMDir(String dirPath) { + File dirFile = new File(dirPath); + if (!dirFile.mkdirs() && !dirFile.exists()) { + throw new IllegalArgumentException("Unable to create path: " + dirFile); + } + return dirFile; + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index 43c1cedf6189..f17a2f4ba527 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -189,4 +189,9 @@ ContainerInfo getMatchingContainer(long size, String owner, * @param success */ void notifyContainerReportProcessing(boolean isFullReport, boolean success); + + /** + * Flush metadata of container manager if they are required to be persisted. + */ + void flushDB() throws IOException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java index 9f47608f30ee..ee8c68937aa0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java @@ -365,13 +365,20 @@ private HddsProtos.LifeCycleState updateContainerState( } } - /** - * Update deleteTransactionId according to deleteTransactionMap. - * - * @param deleteTransactionMap Maps the containerId to latest delete - * transaction id for the container. - * @throws IOException - */ + @Override + public void flushDB() throws IOException { + if (containerStore != null) { + containerStore.flushDB(true); + } + } + + /** + * Update deleteTransactionId according to deleteTransactionMap. + * + * @param deleteTransactionMap Maps the containerId to latest delete + * transaction id for the container. + * @throws IOException + */ public void updateDeleteTransactionId(Map deleteTransactionMap) throws IOException { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java index c0364adb1b16..eb22566f51d2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java @@ -18,8 +18,15 @@ package org.apache.hadoop.hdds.scm.ha; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ScmUtils; +import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisServer; + +import java.io.File; +import java.util.Collection; /** * Utility class used by SCM HA. @@ -34,4 +41,39 @@ public static boolean isSCMHAEnabled(OzoneConfiguration conf) { return conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT); } + + public static File createSCMRatisDir(OzoneConfiguration conf) + throws IllegalArgumentException { + String scmRatisDir = SCMRatisServer.getSCMRatisDirectory(conf); + if (scmRatisDir == null || scmRatisDir.isEmpty()) { + throw new IllegalArgumentException(HddsConfigKeys.OZONE_METADATA_DIRS + + " must be defined."); + } + return ScmUtils.createSCMDir(scmRatisDir); + } + + /** + * Get a collection of all scmNodeIds for the given scmServiceId. + */ + public static Collection getSCMNodeIds(Configuration conf, + String scmServiceId) { + String key = addSuffix(ScmConfigKeys.OZONE_SCM_NODES_KEY, scmServiceId); + return conf.getTrimmedStringCollection(key); + } + + public static String getLocalSCMNodeId(String scmServiceId) { + return addSuffix(ScmConfigKeys.OZONE_SCM_NODES_KEY, scmServiceId); + } + + /** + * Add non empty and non null suffix to a key. + */ + private static String addSuffix(String key, String suffix) { + if (suffix == null || suffix.isEmpty()) { + return key; + } + assert !suffix.startsWith(".") : + "suffix '" + suffix + "' should not already have '.' prepended."; + return key + "." + suffix; + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java index 8d66187ee748..2390cb3a87cd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMNodeDetails.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.ozone.OzoneConsts; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -29,6 +28,7 @@ import java.net.InetSocketAddress; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_INTERNAL_SERVICE_ID; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY; /** * Construct SCM node details. @@ -153,6 +153,18 @@ public String getRpcAddressString() { public static SCMNodeDetails initStandAlone( OzoneConfiguration conf) throws IOException { String localSCMServiceId = conf.getTrimmed(OZONE_SCM_INTERNAL_SERVICE_ID); + if (localSCMServiceId == null) { + // There is no internal om service id is being set, fall back to ozone + // .om.service.ids. + LOG.info("{} is not defined, falling back to {} to find serviceID for " + + "SCM if it is HA enabled cluster", + OZONE_SCM_INTERNAL_SERVICE_ID, OZONE_SCM_SERVICE_IDS_KEY); + localSCMServiceId = conf.getTrimmed( + OZONE_SCM_SERVICE_IDS_KEY); + } else { + LOG.info("ServiceID for SCM is {}", localSCMServiceId); + } + String localSCMNodeId = SCMHAUtils.getLocalSCMNodeId(localSCMServiceId); int ratisPort = conf.getInt( ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY, ScmConfigKeys.OZONE_SCM_RATIS_PORT_DEFAULT); @@ -161,8 +173,8 @@ public static SCMNodeDetails initStandAlone( SCMNodeDetails scmNodeDetails = new SCMNodeDetails.Builder() .setRatisPort(ratisPort) .setRpcAddress(rpcAddress) - .setSCMNodeId(localSCMServiceId) - .setSCMServiceId(OzoneConsts.SCM_SERVICE_ID_DEFAULT) + .setSCMNodeId(localSCMNodeId) + .setSCMServiceId(localSCMServiceId) .build(); return scmNodeDetails; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMStateMachine.java deleted file mode 100644 index 502260a9c8f6..000000000000 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMStateMachine.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.ratis; - -import org.apache.ratis.statemachine.impl.BaseStateMachine; - -/** - * Class for SCM StateMachine. - */ -public class SCMStateMachine extends BaseStateMachine { - //TODO to be implemented - public SCMStateMachine(SCMRatisServer ratisServer) { - - } - - public void stop() { - return; - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 4f30e66f3a0b..8ef5813871c6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -25,6 +25,13 @@ import java.io.IOException; import java.net.InetAddress; import java.net.InetSocketAddress; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import com.google.common.cache.RemovalListener; +import com.google.protobuf.BlockingService; + +import java.io.File; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.Collection; @@ -45,7 +52,8 @@ import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; -import org.apache.hadoop.hdds.scm.ratis.SCMRatisServer; +import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisServer; +import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisSnapshotInfo; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -113,13 +121,9 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.JvmPauseMonitor; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.protobuf.BlockingService; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT; import org.apache.ratis.grpc.GrpcTlsConfig; +import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -194,6 +198,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl // SCM HA related private SCMRatisServer scmRatisServer; + private SCMRatisSnapshotInfo scmRatisSnapshotInfo; + private File scmRatisSnapshotDir; private JvmPauseMonitor jvmPauseMonitor; private final OzoneConfiguration configuration; @@ -263,6 +269,9 @@ public StorageContainerManager(OzoneConfiguration conf, } if (SCMHAUtils.isSCMHAEnabled(conf)) { + this.scmRatisSnapshotInfo = new SCMRatisSnapshotInfo( + scmStorageConfig.getCurrentDir()); + this.scmRatisSnapshotDir = SCMHAUtils.createSCMRatisDir(conf); initializeRatisServer(); } else { scmRatisServer = null; @@ -795,6 +804,10 @@ public void start() throws IOException { getClientRpcAddress())); } + if (scmRatisServer != null) { + scmRatisServer.start(); + } + ms = HddsServerUtil .initializeMetrics(configuration, "StorageContainerManager"); @@ -1136,4 +1149,38 @@ private void initializeRatisServer() throws IOException { } } } + + @VisibleForTesting + public SCMRatisServer getScmRatisServer() { + return scmRatisServer; + } + + @VisibleForTesting + public SCMRatisSnapshotInfo getSnapshotInfo() { + return scmRatisSnapshotInfo; + } + + @VisibleForTesting + public long getRatisSnapshotIndex() { + return scmRatisSnapshotInfo.getIndex(); + } + + /** + * Save ratis snapshot to SCM meta store and local disk. + */ + public TermIndex saveRatisSnapshot() throws IOException { + TermIndex snapshotIndex = scmRatisServer.getLastAppliedTermIndex(); + if (scmMetadataStore != null) { + // Flush the SCM state to disk + scmMetadataStore.getStore().flush(); + } + + if (containerManager != null) { + containerManager.flushDB(); + } + + scmRatisSnapshotInfo.saveRatisSnapshotToDisk(snapshotIndex); + + return snapshotIndex; + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisServer.java similarity index 97% rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMRatisServer.java rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisServer.java index af1e5c2c30ba..77dee6a0f9e6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/SCMRatisServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisServer.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.scm.ratis; +package org.apache.hadoop.hdds.scm.server.ratis; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; @@ -42,6 +42,7 @@ import org.apache.ratis.rpc.SupportedRpcType; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.RaftServerConfigKeys; +import org.apache.ratis.server.protocol.TermIndex; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.util.LifeCycle; import org.apache.ratis.util.SizeInBytes; @@ -94,6 +95,10 @@ private static long nextCallId() { return CALL_ID_COUNTER.getAndIncrement() & Long.MAX_VALUE; } + /** + * Creates a SCM Ratis Server. + * @throws IOException + */ private SCMRatisServer(Configuration conf, StorageContainerManager scm, String raftGroupIdStr, RaftPeerId localRaftPeerId, @@ -139,6 +144,9 @@ public void run() { }, roleCheckInitialDelayMs, roleCheckIntervalMs, TimeUnit.MILLISECONDS); } + /** + * Create a SCM Ratis Server instance. + */ public static SCMRatisServer newSCMRatisServer( Configuration conf, StorageContainerManager scm, SCMNodeDetails scmNodeDetails, List peers) @@ -178,7 +186,7 @@ private SCMStateMachine getStateMachine() { return new SCMStateMachine(this); } - private RaftProperties newRaftProperties(Configuration conf){ + private RaftProperties newRaftProperties(Configuration conf) { final RaftProperties properties = new RaftProperties(); // Set RPC type final String rpcType = conf.get( @@ -403,6 +411,15 @@ public Optional getCachedLeaderPeerId() { } } + public StorageContainerManager getSCM() { + return scm; + } + + @VisibleForTesting + public SCMStateMachine getScmStateMachine() { + return scmStateMachine; + } + public int getServerPort() { return port; } @@ -441,6 +458,10 @@ public void updateServerRole() { } } + public TermIndex getLastAppliedTermIndex() { + return scmStateMachine.getLastAppliedTermIndex(); + } + private GroupInfoReply getGroupInfo() throws IOException { GroupInfoRequest groupInfoRequest = new GroupInfoRequest(clientId, raftPeerId, raftGroupId, nextCallId()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisSnapshotInfo.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisSnapshotInfo.java new file mode 100644 index 000000000000..11b3234a9838 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisSnapshotInfo.java @@ -0,0 +1,179 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.server.ratis; + +import org.apache.ratis.server.protocol.TermIndex; +import org.apache.ratis.server.storage.FileInfo; +import org.apache.ratis.statemachine.SnapshotInfo; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.yaml.snakeyaml.DumperOptions; +import org.yaml.snakeyaml.Yaml; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStreamWriter; +import java.io.Writer; +import java.util.List; + +import static org.apache.hadoop.ozone.OzoneConsts.SCM_RATIS_SNAPSHOT_INDEX; + +/** + * This class captures the snapshotIndex and term of the latest snapshot in + * the SCM. + * Ratis server loads the snapshotInfo during startup and updates the + * lastApplied index to this snapshotIndex. OM SnapshotInfo does not contain + * any files. It is used only to store/ update the last applied index and term. + */ +public class SCMRatisSnapshotInfo implements SnapshotInfo { + + static final Logger LOG = LoggerFactory.getLogger(SCMRatisSnapshotInfo.class); + + private volatile long term = 0; + private volatile long snapshotIndex = -1; + + private final File ratisSnapshotFile; + + public SCMRatisSnapshotInfo(File ratisDir) throws IOException { + ratisSnapshotFile = new File(ratisDir, SCM_RATIS_SNAPSHOT_INDEX); + loadRatisSnapshotIndex(); + } + + public void updateTerm(long newTerm) { + term = newTerm; + } + + private void updateTermIndex(long newTerm, long newIndex) { + this.term = newTerm; + this.snapshotIndex = newIndex; + } + + /** + * Load the snapshot index and term from the snapshot file on disk, + * if it exists. + * @throws IOException + */ + private void loadRatisSnapshotIndex() throws IOException { + if (ratisSnapshotFile.exists()) { + RatisSnapshotYaml ratisSnapshotYaml = readRatisSnapshotYaml(); + updateTermIndex(ratisSnapshotYaml.term, ratisSnapshotYaml.snapshotIndex); + } + } + + /** + * Read and parse the snapshot yaml file. + */ + private RatisSnapshotYaml readRatisSnapshotYaml() throws IOException { + try (FileInputStream inputFileStream = new FileInputStream( + ratisSnapshotFile)) { + Yaml yaml = new Yaml(); + try { + return yaml.loadAs(inputFileStream, RatisSnapshotYaml.class); + } catch (Exception e) { + throw new IOException("Unable to parse RatisSnapshot yaml file.", e); + } + } + } + + /** + * Update and persist the snapshot index and term to disk. + * @param lastAppliedTermIndex new snapshot index to be persisted to disk. + * @throws IOException + */ + public void saveRatisSnapshotToDisk(TermIndex lastAppliedTermIndex) + throws IOException { + updateTermIndex(lastAppliedTermIndex.getTerm(), + lastAppliedTermIndex.getIndex()); + writeRatisSnapshotYaml(); + LOG.info("Saved Ratis Snapshot on the SCM with snapshotIndex {}", + lastAppliedTermIndex); + } + + /** + * Write snapshot details to disk in yaml format. + */ + private void writeRatisSnapshotYaml() throws IOException { + DumperOptions options = new DumperOptions(); + options.setPrettyFlow(true); + options.setDefaultFlowStyle(DumperOptions.FlowStyle.FLOW); + Yaml yaml = new Yaml(options); + + RatisSnapshotYaml ratisSnapshotYaml = new RatisSnapshotYaml(term, + snapshotIndex); + + try (Writer writer = new OutputStreamWriter( + new FileOutputStream(ratisSnapshotFile), "UTF-8")) { + yaml.dump(ratisSnapshotYaml, writer); + } + } + + @Override + public TermIndex getTermIndex() { + return TermIndex.newTermIndex(term, snapshotIndex); + } + + @Override + public long getTerm() { + return term; + } + + @Override + public long getIndex() { + return snapshotIndex; + } + + @Override + public List getFiles() { + return null; + } + + /** + * Ratis Snapshot details to be written to the yaml file. + */ + public static class RatisSnapshotYaml { + private long term; + private long snapshotIndex; + + public RatisSnapshotYaml() { + // Needed for snake-yaml introspection. + } + + RatisSnapshotYaml(long term, long snapshotIndex) { + this.term = term; + this.snapshotIndex = snapshotIndex; + } + + public void setTerm(long term) { + this.term = term; + } + + public long getTerm() { + return this.term; + } + + public void setSnapshotIndex(long index) { + this.snapshotIndex = index; + } + + public long getSnapshotIndex() { + return this.snapshotIndex; + } + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java new file mode 100644 index 000000000000..b60570b9e7e8 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java @@ -0,0 +1,240 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.server.ratis; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.util.concurrent.HadoopExecutors; +import org.apache.ratis.proto.RaftProtos; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; +import org.apache.ratis.protocol.RaftGroupId; +import org.apache.ratis.server.RaftServer; +import org.apache.ratis.server.protocol.TermIndex; +import org.apache.ratis.server.storage.RaftStorage; +import org.apache.ratis.statemachine.SnapshotInfo; +import org.apache.ratis.statemachine.TransactionContext; +import org.apache.ratis.statemachine.impl.BaseStateMachine; +import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage; +import org.apache.ratis.util.LifeCycle; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.util.Collection; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ConcurrentSkipListMap; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; + +/** + * Class for SCM StateMachine. + */ +public class SCMStateMachine extends BaseStateMachine { + static final Logger LOG = + LoggerFactory.getLogger(SCMStateMachine.class); + private final SimpleStateMachineStorage storage = + new SimpleStateMachineStorage(); + private final SCMRatisServer scmRatisServer; + private final StorageContainerManager scm; + private RaftGroupId raftGroupId; + private final SCMRatisSnapshotInfo snapshotInfo; + private final ExecutorService executorService; + private final ExecutorService installSnapshotExecutor; + + // Map which contains index and term for the ratis transactions which are + // stateMachine entries which are recived through applyTransaction. + private ConcurrentMap applyTransactionMap = + new ConcurrentSkipListMap<>(); + + /** + * Create a SCM state machine. + */ + public SCMStateMachine(SCMRatisServer ratisServer) { + this.scmRatisServer = ratisServer; + this.scm = ratisServer.getSCM(); + + this.snapshotInfo = scm.getSnapshotInfo(); + updateLastAppliedIndexWithSnaphsotIndex(); + + ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("SCM StateMachine ApplyTransaction Thread - %d").build(); + this.executorService = HadoopExecutors.newSingleThreadExecutor(build); + this.installSnapshotExecutor = HadoopExecutors.newSingleThreadExecutor(); + } + + /** + * Initializes the State Machine with the given server, group and storage. + */ + @Override + public void initialize(RaftServer server, RaftGroupId id, + RaftStorage raftStorage) throws IOException { + lifeCycle.startAndTransition(() -> { + super.initialize(server, id, raftStorage); + this.raftGroupId = id; + storage.init(raftStorage); + }); + } + + /** + * Pre-execute the update request into state machine. + */ + @Override + public TransactionContext startTransaction( + RaftClientRequest raftClientRequest) { + return TransactionContext.newBuilder() + .setClientRequest(raftClientRequest) + .setStateMachine(this) + .setServerRole(RaftProtos.RaftPeerRole.LEADER) + .setLogData(raftClientRequest.getMessage().getContent()) + .build(); + } + + /** + * Apply a committed log entry to state machine. + */ + @Override + public CompletableFuture applyTransaction(TransactionContext trx) { + CompletableFuture ratisFuture = + new CompletableFuture<>(); + //TODO execute SCMRequest and process SCMResponse + return ratisFuture; + } + + /** + * Query state machine. + */ + @Override + public CompletableFuture query(Message request) { + //TODO make handler respond to the query request. + return CompletableFuture.completedFuture(request); + } + + /** + * Pause state machine. + */ + @Override + public void pause() { + lifeCycle.transition(LifeCycle.State.PAUSING); + lifeCycle.transition(LifeCycle.State.PAUSED); + } + + /** + * Unpause state machine and update the lastAppliedIndex. + * Following after uploading new state to state machine. + */ + public void unpause(long newLastAppliedSnaphsotIndex, + long newLastAppliedSnapShotTermIndex) { + lifeCycle.startAndTransition(() -> { + this.setLastAppliedTermIndex(TermIndex.newTermIndex( + newLastAppliedSnapShotTermIndex, newLastAppliedSnaphsotIndex)); + }); + } + + /** + * Take SCM snapshot and write index to file. + * @return actual index or 0 if error. + */ + @Override + public long takeSnapshot() throws IOException { + LOG.info("Saving Ratis snapshot on the SCM."); + if (scm != null) { + return scm.saveRatisSnapshot().getIndex(); + } + return 0; + } + + /** + * Get latest SCM snapshot. + */ + @Override + public SnapshotInfo getLatestSnapshot() { + return snapshotInfo; + } + + private synchronized void updateLastApplied() { + Long appliedTerm = null; + long appliedIndex = -1; + for(long i = getLastAppliedTermIndex().getIndex() + 1;; i++) { + final Long removed = applyTransactionMap.remove(i); + if (removed == null) { + break; + } + appliedTerm = removed; + appliedIndex = i; + } + if (appliedTerm != null) { + updateLastAppliedTermIndex(appliedTerm, appliedIndex); + } + } + + /** + * Called to notify state machine about indexes which are processed + * internally by Raft Server, this currently happens when conf entries are + * processed in raft Server. This keep state machine to keep a track of index + * updates. + */ + @Override + public void notifyIndexUpdate(long currentTerm, long index) { + applyTransactionMap.put(index, currentTerm); + updateLastApplied(); + snapshotInfo.updateTerm(currentTerm); + } + + /** + * Notifies the state machine that the raft peer is no longer leader. + */ + @Override + public void notifyNotLeader(Collection pendingEntries) { + scmRatisServer.updateServerRole(); + } + + /** + * Transfer from log entry to string. + */ + @Override + public String toStateMachineLogEntryString( + RaftProtos.StateMachineLogEntryProto proto) { + //TODO implement transfer from proto to SCMRequest body. + return null; + } + + /** + * Update lastAppliedIndex term in snapshot info. + */ + public void updateLastAppliedIndexWithSnaphsotIndex() { + setLastAppliedTermIndex(TermIndex.newTermIndex(snapshotInfo.getTerm(), + snapshotInfo.getIndex())); + LOG.info("LastAppliedIndex set from SnapShotInfo {}", + getLastAppliedTermIndex()); + } + + @VisibleForTesting + void addApplyTransactionTermIndex(long term, long index) { + applyTransactionMap.put(index, term); + } + + public void stop() { + HadoopExecutors.shutdown(executorService, LOG, 5, TimeUnit.SECONDS); + HadoopExecutors.shutdown(installSnapshotExecutor, LOG, 5, TimeUnit.SECONDS); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/package-info.java similarity index 94% rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/package-info.java index 494401759320..77f4afa830c5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ratis/package-info.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/package-info.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdds.scm.ratis; +package org.apache.hadoop.hdds.scm.server.ratis; /** * This package contains classes related to Apache Ratis for SCM. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ratis/TestSCMRatisServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java similarity index 86% rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ratis/TestSCMRatisServer.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java index f29fb5fed35b..d6981d3d8cc5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ratis/TestSCMRatisServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.hdds.scm.ratis; +package org.apache.hadoop.hdds.scm.server.ratis; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.test.GenericTestUtils; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.util.LifeCycle; @@ -59,7 +58,6 @@ public class TestSCMRatisServer { private SCMRatisServer scmRatisServer; private StorageContainerManager scm; private String scmId; - private SCMNodeDetails scmNodeDetails; private static final long LEADER_ELECTION_TIMEOUT = 500L; @Before @@ -69,25 +67,14 @@ public void init() throws Exception { conf.setTimeDuration( ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); - int ratisPort = conf.getInt( - ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY, - ScmConfigKeys.OZONE_SCM_RATIS_PORT_DEFAULT); - InetSocketAddress rpcAddress = new InetSocketAddress( - InetAddress.getLocalHost(), 0); - scmNodeDetails = new SCMNodeDetails.Builder() - .setRatisPort(ratisPort) - .setRpcAddress(rpcAddress) - .setSCMNodeId(scmId) - .setSCMServiceId(OzoneConsts.SCM_SERVICE_ID_DEFAULT) - .build(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); + conf.set(ScmConfigKeys.OZONE_SCM_INTERNAL_SERVICE_ID, "scm-ha-test"); // Standalone SCM Ratis server initSCM(); scm = HddsTestUtils.getScm(conf); scm.start(); - scmRatisServer = SCMRatisServer.newSCMRatisServer( - conf, scm, scmNodeDetails, Collections.EMPTY_LIST); - scmRatisServer.start(); + scmRatisServer = scm.getScmRatisServer(); } @After @@ -101,7 +88,7 @@ public void shutdown() { } @Test - public void testStartSCMRatisServer() throws Exception { + public void testStartSCMRatisServer() { Assert.assertEquals("Ratis Server should be in running state", LifeCycle.State.RUNNING, scmRatisServer.getServerState()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java new file mode 100644 index 000000000000..69bc5bd93b68 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java @@ -0,0 +1,120 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.server.ratis; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.HddsTestUtils; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.UUID; + +import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; + +/** + * Test class for SCMStateMachine. + */ +public class TestSCMStateMachine { + @Rule + public TemporaryFolder folder = new TemporaryFolder(); + + private SCMStateMachine scmStateMachine; + private StorageContainerManager scm; + private SCMRatisServer scmRatisServer; + private OzoneConfiguration conf; + private String scmId; + @Before + public void init() throws Exception { + conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); + conf.set(ScmConfigKeys.OZONE_SCM_INTERNAL_SERVICE_ID, "scm-ha-test"); + scmId = UUID.randomUUID().toString(); + + initSCM(); + scm = HddsTestUtils.getScm(conf); + scm.start(); + scmRatisServer = scm.getScmRatisServer(); + scmStateMachine = scm.getScmRatisServer().getScmStateMachine(); + } + + @Test + public void testSCMUpdatedAppliedIndex(){ + // State machine should start with 0 term and 0 index. + scmStateMachine.notifyIndexUpdate(0, 0); + Assert.assertEquals(0, + scmStateMachine.getLastAppliedTermIndex().getTerm()); + Assert.assertEquals(0, + scmStateMachine.getLastAppliedTermIndex().getIndex()); + + // If only the transactionMap is updated, index should stay 0. + scmStateMachine.addApplyTransactionTermIndex(0, 1); + Assert.assertEquals(0L, + scmStateMachine.getLastAppliedTermIndex().getTerm()); + Assert.assertEquals(0L, + scmStateMachine.getLastAppliedTermIndex().getIndex()); + + // After the index update is notified, the index should increase. + scmStateMachine.notifyIndexUpdate(0, 1); + Assert.assertEquals(0L, + scmStateMachine.getLastAppliedTermIndex().getTerm()); + Assert.assertEquals(1L, + scmStateMachine.getLastAppliedTermIndex().getIndex()); + + // Only do a notifyIndexUpdate can also increase the index. + scmStateMachine.notifyIndexUpdate(0, 2); + Assert.assertEquals(0L, + scmStateMachine.getLastAppliedTermIndex().getTerm()); + Assert.assertEquals(2L, + scmStateMachine.getLastAppliedTermIndex().getIndex()); + + // If a larger index is notified, the index should not be updated. + scmStateMachine.notifyIndexUpdate(0, 5); + Assert.assertEquals(0L, + scmStateMachine.getLastAppliedTermIndex().getTerm()); + Assert.assertEquals(2L, + scmStateMachine.getLastAppliedTermIndex().getIndex()); + } + + private void initSCM() throws IOException { + String clusterId = UUID.randomUUID().toString(); + final String path = folder.newFolder().toString(); + Path scmPath = Paths.get(path, "scm-meta"); + Files.createDirectories(scmPath); + conf.set(OZONE_METADATA_DIRS, scmPath.toString()); + SCMStorageConfig scmStore = new SCMStorageConfig(conf); + scmStore.setClusterId(clusterId); + scmStore.setScmId(scmId); + // writes the version file properties + scmStore.initialize(); + } + + @After + public void cleanup() { + scm.stop(); + } +} From 8f2107a641664c6b71928916cc5648f364bf926e Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Thu, 30 Apr 2020 16:26:03 +0800 Subject: [PATCH 03/51] Resolve conflicts with merge from master. --- .../hdds/scm/container/ContainerManager.java | 5 --- .../scm/container/SCMContainerManager.java | 7 --- .../scm/server/StorageContainerManager.java | 4 -- .../hdds/scm/server/ratis/SCMRatisServer.java | 44 +++++++++---------- .../scm/server/ratis/SCMStateMachine.java | 8 ++-- 5 files changed, 26 insertions(+), 42 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index f17a2f4ba527..43c1cedf6189 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -189,9 +189,4 @@ ContainerInfo getMatchingContainer(long size, String owner, * @param success */ void notifyContainerReportProcessing(boolean isFullReport, boolean success); - - /** - * Flush metadata of container manager if they are required to be persisted. - */ - void flushDB() throws IOException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java index ee8c68937aa0..50b28291e1a8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java @@ -365,13 +365,6 @@ private HddsProtos.LifeCycleState updateContainerState( } } - @Override - public void flushDB() throws IOException { - if (containerStore != null) { - containerStore.flushDB(true); - } - } - /** * Update deleteTransactionId according to deleteTransactionMap. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 8ef5813871c6..193e09f324c4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -1175,10 +1175,6 @@ public TermIndex saveRatisSnapshot() throws IOException { scmMetadataStore.getStore().flush(); } - if (containerManager != null) { - containerManager.flushDB(); - } - scmRatisSnapshotInfo.saveRatisSnapshotToDisk(snapshotIndex); return snapshotIndex; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisServer.java index 77dee6a0f9e6..89a9d557f2df 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisServer.java @@ -20,8 +20,8 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -30,7 +30,8 @@ import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.grpc.GrpcConfigKeys; import org.apache.ratis.netty.NettyConfigKeys; -import org.apache.ratis.proto.RaftProtos; +import org.apache.ratis.proto.RaftProtos.RaftPeerRole; +import org.apache.ratis.proto.RaftProtos.RoleInfoProto; import org.apache.ratis.protocol.ClientId; import org.apache.ratis.protocol.GroupInfoReply; import org.apache.ratis.protocol.GroupInfoRequest; @@ -87,7 +88,7 @@ public final class SCMRatisServer { private long roleCheckInitialDelayMs = 1000; // 1 second default private long roleCheckIntervalMs; private ReentrantReadWriteLock roleCheckLock = new ReentrantReadWriteLock(); - private Optional cachedPeerRole = Optional.empty(); + private Optional cachedPeerRole = Optional.empty(); private Optional cachedLeaderPeerId = Optional.empty(); private static final AtomicLong CALL_ID_COUNTER = new AtomicLong(); @@ -99,7 +100,7 @@ private static long nextCallId() { * Creates a SCM Ratis Server. * @throws IOException */ - private SCMRatisServer(Configuration conf, + private SCMRatisServer(ConfigurationSource conf, StorageContainerManager scm, String raftGroupIdStr, RaftPeerId localRaftPeerId, InetSocketAddress addr, List raftPeers) @@ -137,7 +138,7 @@ private SCMRatisServer(Configuration conf, public void run() { // Run this check only on the leader OM if (cachedPeerRole.isPresent() && - cachedPeerRole.get() == RaftProtos.RaftPeerRole.LEADER) { + cachedPeerRole.get() == RaftPeerRole.LEADER) { updateServerRole(); } } @@ -148,7 +149,7 @@ public void run() { * Create a SCM Ratis Server instance. */ public static SCMRatisServer newSCMRatisServer( - Configuration conf, StorageContainerManager scm, + ConfigurationSource conf, StorageContainerManager scm, SCMNodeDetails scmNodeDetails, List peers) throws IOException { String scmServiceId = scmNodeDetails.getSCMServiceId(); @@ -186,7 +187,7 @@ private SCMStateMachine getStateMachine() { return new SCMStateMachine(this); } - private RaftProperties newRaftProperties(Configuration conf) { + private RaftProperties newRaftProperties(ConfigurationSource conf) { final RaftProperties properties = new RaftProperties(); // Set RPC type final String rpcType = conf.get( @@ -202,20 +203,20 @@ private RaftProperties newRaftProperties(Configuration conf) { } // Set Ratis storage directory String storageDir = SCMRatisServer.getSCMRatisDirectory(conf); - RaftServerConfigKeys.setStorageDirs(properties, + RaftServerConfigKeys.setStorageDir(properties, Collections.singletonList(new File(storageDir))); // Set RAFT segment size final int raftSegmentSize = (int) conf.getStorageSize( ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_SIZE_KEY, ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_SIZE_DEFAULT, - StorageUnit.BYTES); + org.apache.hadoop.hdds.conf.StorageUnit.BYTES); RaftServerConfigKeys.Log.setSegmentSizeMax(properties, SizeInBytes.valueOf(raftSegmentSize)); // Set RAFT segment pre-allocated size final int raftSegmentPreallocatedSize = (int) conf.getStorageSize( ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, - StorageUnit.BYTES); + org.apache.hadoop.hdds.conf.StorageUnit.BYTES); int logAppenderQueueNumElements = conf.getInt( ScmConfigKeys.OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, ScmConfigKeys.OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); @@ -280,7 +281,7 @@ private RaftProperties newRaftProperties(Configuration conf) { RaftServerConfigKeys.Rpc.setTimeoutMax(properties, serverMaxTimeout); // Set the number of maximum cached segments - RaftServerConfigKeys.Log.setMaxCachedSegmentNum(properties, 2); + RaftServerConfigKeys.Log.setSegmentCacheNumMax(properties, 2); // TODO: set max write buffer size // Set the ratis leader election timeout TimeUnit leaderElectionMinTimeoutUnit = @@ -355,7 +356,7 @@ private boolean checkCachedPeerRoleIsLeader() { this.roleCheckLock.readLock().lock(); try { if (cachedPeerRole.isPresent() && - cachedPeerRole.get() == RaftProtos.RaftPeerRole.LEADER) { + cachedPeerRole.get() ==RaftPeerRole.LEADER) { return true; } return false; @@ -393,7 +394,7 @@ public RaftGroup getRaftGroup() { /** * Get the local directory where ratis logs will be stored. */ - public static String getSCMRatisDirectory(Configuration conf) { + public static String getSCMRatisDirectory(ConfigurationSource conf) { String storageDir = conf.get(ScmConfigKeys.OZONE_SCM_RATIS_STORAGE_DIR); if (Strings.isNullOrEmpty(storageDir)) { @@ -427,17 +428,17 @@ public int getServerPort() { public void updateServerRole() { try { GroupInfoReply groupInfo = getGroupInfo(); - RaftProtos.RoleInfoProto roleInfoProto = groupInfo.getRoleInfoProto(); - RaftProtos.RaftPeerRole thisNodeRole = roleInfoProto.getRole(); + RoleInfoProto roleInfoProto = groupInfo.getRoleInfoProto(); + RaftPeerRole thisNodeRole = roleInfoProto.getRole(); - if (thisNodeRole.equals(RaftProtos.RaftPeerRole.LEADER)) { + if (thisNodeRole.equals(RaftPeerRole.LEADER)) { setServerRole(thisNodeRole, raftPeerId); - } else if (thisNodeRole.equals(RaftProtos.RaftPeerRole.FOLLOWER)) { + } else if (thisNodeRole.equals(RaftPeerRole.FOLLOWER)) { ByteString leaderNodeId = roleInfoProto.getFollowerInfo() .getLeaderInfo().getId().getId(); // There may be a chance, here we get leaderNodeId as null. For - // example, in 3 node OM Ratis, if 2 SCM nodes are down, there will + // example, in 3 node OM Ratis, if 2 OM nodes are down, there will // be no leader. RaftPeerId leaderPeerId = null; if (leaderNodeId != null && !leaderNodeId.isEmpty()) { @@ -452,8 +453,7 @@ public void updateServerRole() { } } catch (IOException e) { LOG.error("Failed to retrieve RaftPeerRole. Setting cached role to " + - "{} and resetting leader info.", - RaftProtos.RaftPeerRole.UNRECOGNIZED, e); + "{} and resetting leader info.", RaftPeerRole.UNRECOGNIZED, e); setServerRole(null, null); } } @@ -469,7 +469,7 @@ private GroupInfoReply getGroupInfo() throws IOException { return groupInfo; } - private void setServerRole(RaftProtos.RaftPeerRole currentRole, + private void setServerRole(RaftPeerRole currentRole, RaftPeerId leaderPeerId) { this.roleCheckLock.writeLock().lock(); try { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java index b60570b9e7e8..144380a620c1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java @@ -88,7 +88,7 @@ public SCMStateMachine(SCMRatisServer ratisServer) { @Override public void initialize(RaftServer server, RaftGroupId id, RaftStorage raftStorage) throws IOException { - lifeCycle.startAndTransition(() -> { + getLifeCycle().startAndTransition(() -> { super.initialize(server, id, raftStorage); this.raftGroupId = id; storage.init(raftStorage); @@ -134,8 +134,8 @@ public CompletableFuture query(Message request) { */ @Override public void pause() { - lifeCycle.transition(LifeCycle.State.PAUSING); - lifeCycle.transition(LifeCycle.State.PAUSED); + getLifeCycle().transition(LifeCycle.State.PAUSING); + getLifeCycle().transition(LifeCycle.State.PAUSED); } /** @@ -144,7 +144,7 @@ public void pause() { */ public void unpause(long newLastAppliedSnaphsotIndex, long newLastAppliedSnapShotTermIndex) { - lifeCycle.startAndTransition(() -> { + getLifeCycle().startAndTransition(() -> { this.setLastAppliedTermIndex(TermIndex.newTermIndex( newLastAppliedSnapShotTermIndex, newLastAppliedSnaphsotIndex)); }); From aa2884c9ae2fa6acb8404dcc36a2a6ded9e4c8ec Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Wed, 20 May 2020 10:05:57 +0800 Subject: [PATCH 04/51] HDDS-3556 Refactor conf in SCMRatisServer to Java-based conf. (#907) --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 2 +- .../src/main/resources/ozone-default.xml | 4 +- .../org/apache/hadoop/hdds/conf/Config.java | 5 + .../apache/hadoop/hdds/conf/ConfigTag.java | 3 +- .../conf/ConfigurationReflectionUtil.java | 4 + .../scm/server/StorageContainerManager.java | 89 +++--- .../hdds/scm/server/ratis/SCMRatisServer.java | 282 ++++++++++++------ .../org/apache/hadoop/hdds/scm/TestUtils.java | 2 +- .../scm/server/ratis/TestSCMRatisServer.java | 6 +- .../hadoop/ozone/genesis/GenesisUtil.java | 2 +- 10 files changed, 270 insertions(+), 129 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 37a1833e5809..3e028f773d28 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -427,7 +427,7 @@ public final class ScmConfigKeys { // SCM Ratis Leader Election configurations public static final String OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY = - "ozone.scm.leader.election.minimum.timeout.duration"; + "ozone.scm.ratis.leader.election.minimum.timeout.duration"; public static final TimeDuration OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT = TimeDuration.valueOf(1, TimeUnit.SECONDS); diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index cd350989be85..337109b0b5a9 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2041,7 +2041,7 @@ ozone.scm.ratis.server.request.timeout 3s OZONE, SCM, HA, RATIS - The timeout duration for SCM's ratis server request . + The timeout duration for SCM's ratis server request. @@ -2060,7 +2060,7 @@ - ozone.scm.leader.election.minimum.timeout.duration + ozone.scm.ratis.leader.election.minimum.timeout.duration 1s OZONE, SCM, HA, RATIS The minimum timeout duration for SCM ratis leader election. diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java index 316c867e9944..5d4b4774a5a1 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/Config.java @@ -55,5 +55,10 @@ */ TimeUnit timeUnit() default TimeUnit.MILLISECONDS; + /** + * If type == SIZE the unit should be defined with this attribute. + */ + StorageUnit sizeUnit() default StorageUnit.BYTES; + ConfigTag[] tags(); } diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java index 385840a08b95..39d481eadc81 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java @@ -42,5 +42,6 @@ public enum ConfigTag { STANDALONE, S3GATEWAY, DATANODE, - RECON + RECON, + HA } diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java index 8d3b4f218ca0..719352cedd25 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java @@ -90,6 +90,10 @@ public static void injectConfigurationToObject(ConfigurationSource from, forcedFieldSet(field, configuration, from.getTimeDuration(key, "0s", configAnnotation.timeUnit())); break; + case SIZE: + forcedFieldSet(field, configuration, + from.getStorageSize(key, "0B", configAnnotation.sizeUnit())); + break; default: throw new ConfigurationException( "Unsupported ConfigType " + type + " on " + fieldLocation); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 193e09f324c4..6342f88a7bf3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -219,7 +219,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl * * @param conf configuration */ - public StorageContainerManager(OzoneConfiguration conf) + private StorageContainerManager(OzoneConfiguration conf) throws IOException, AuthenticationException { // default empty configurator means default managers will be used. this(conf, new SCMConfigurator()); @@ -235,7 +235,7 @@ public StorageContainerManager(OzoneConfiguration conf) * @param conf - Configuration * @param configurator - configurator */ - public StorageContainerManager(OzoneConfiguration conf, + private StorageContainerManager(OzoneConfiguration conf, SCMConfigurator configurator) throws IOException, AuthenticationException { super(HddsVersionInfo.HDDS_VERSION_INFO); @@ -268,14 +268,9 @@ public StorageContainerManager(OzoneConfiguration conf, loginAsSCMUser(conf); } - if (SCMHAUtils.isSCMHAEnabled(conf)) { - this.scmRatisSnapshotInfo = new SCMRatisSnapshotInfo( - scmStorageConfig.getCurrentDir()); - this.scmRatisSnapshotDir = SCMHAUtils.createSCMRatisDir(conf); - initializeRatisServer(); - } else { - scmRatisServer = null; - } + this.scmRatisSnapshotInfo = new SCMRatisSnapshotInfo( + scmStorageConfig.getCurrentDir()); + this.scmRatisSnapshotDir = SCMHAUtils.createSCMRatisDir(conf); // Creates the SCM DBs or opens them if it exists. // A valid pointer to the store is required by all the other services below. @@ -386,6 +381,38 @@ public StorageContainerManager(OzoneConfiguration conf, registerMetricsSource(this); } + /** + * Create an SCM instance based on the supplied configuration. + * + * @param conf HDDS configuration + * @param configurator SCM configurator + * @return SCM instance + * @throws IOException, AuthenticationException + */ + public static StorageContainerManager createSCM( + OzoneConfiguration conf, SCMConfigurator configurator) + throws IOException, AuthenticationException { + StorageContainerManager scm = new StorageContainerManager( + conf, configurator); + if (SCMHAUtils.isSCMHAEnabled(conf) && scm.getScmRatisServer() == null) { + SCMRatisServer scmRatisServer = initializeRatisServer(conf, scm); + scm.setScmRatisServer(scmRatisServer); + } + return scm; + } + + /** + * Create an SCM instance based on the supplied configuration. + * + * @param conf HDDS configuration + * @return SCM instance + * @throws IOException, AuthenticationException + */ + public static StorageContainerManager createSCM(OzoneConfiguration conf) + throws IOException, AuthenticationException { + return createSCM(conf, new SCMConfigurator()); + } + /** * This function initializes the following managers. If the configurator * specifies a value, we will use it, else we will use the default value. @@ -632,18 +659,6 @@ public static RPC.Server startRpcServer( return rpcServer; } - /** - * Create an SCM instance based on the supplied configuration. - * - * @param conf HDDS configuration - * @return SCM instance - * @throws IOException, AuthenticationException - */ - public static StorageContainerManager createSCM(OzoneConfiguration conf) - throws IOException, AuthenticationException { - return new StorageContainerManager(conf); - } - /** * Routine to set up the Version info for StorageContainerManager. * @@ -1136,18 +1151,20 @@ public NetworkTopology getClusterMap() { return this.clusterMap; } - private void initializeRatisServer() throws IOException { - if (scmRatisServer == null) { - SCMNodeDetails scmNodeDetails = SCMNodeDetails - .initStandAlone(configuration); - //TODO enable Ratis ring - scmRatisServer = SCMRatisServer.newSCMRatisServer(configuration, this, - scmNodeDetails, Collections.EMPTY_LIST); - if (scmRatisServer != null) { - LOG.info("SCM Ratis server initialized at port {}", - scmRatisServer.getServerPort()); - } - } + private static SCMRatisServer initializeRatisServer( + OzoneConfiguration conf, StorageContainerManager scm) throws IOException { + SCMNodeDetails scmNodeDetails = SCMNodeDetails + .initStandAlone(conf); + //TODO enable Ratis group + SCMRatisServer scmRatisServer = SCMRatisServer.newSCMRatisServer( + conf.getObject(SCMRatisServer.SCMRatisServerConfiguration.class), + scm, scmNodeDetails, Collections.EMPTY_LIST, + SCMRatisServer.getSCMRatisDirectory(conf)); + if (scmRatisServer != null) { + LOG.info("SCM Ratis server initialized at port {}", + scmRatisServer.getServerPort()); + } // TODO error handling for scmRatisServer creation failure + return scmRatisServer; } @VisibleForTesting @@ -1155,6 +1172,10 @@ public SCMRatisServer getScmRatisServer() { return scmRatisServer; } + public void setScmRatisServer(SCMRatisServer scmRatisServer) { + this.scmRatisServer = scmRatisServer; + } + @VisibleForTesting public SCMRatisSnapshotInfo getSnapshotInfo() { return scmRatisSnapshotInfo; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisServer.java index 89a9d557f2df..9ab8c6618d30 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMRatisServer.java @@ -20,8 +20,10 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; +import org.apache.hadoop.hdds.conf.Config; +import org.apache.hadoop.hdds.conf.ConfigGroup; +import org.apache.hadoop.hdds.conf.ConfigType; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -66,6 +68,11 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; +import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; +import static org.apache.hadoop.hdds.conf.ConfigTag.RATIS; +import static org.apache.hadoop.hdds.conf.ConfigTag.SCM; +import static org.apache.hadoop.hdds.conf.ConfigTag.HA; + /** * Class for SCM Ratis Server. */ @@ -76,6 +83,7 @@ public final class SCMRatisServer { private final StorageContainerManager scm; private final SCMStateMachine scmStateMachine; + private final String storageDir; private final int port; private final InetSocketAddress scmRatisAddress; private final RaftServer server; @@ -100,14 +108,15 @@ private static long nextCallId() { * Creates a SCM Ratis Server. * @throws IOException */ - private SCMRatisServer(ConfigurationSource conf, - StorageContainerManager scm, + private SCMRatisServer(SCMRatisServerConfiguration conf, + StorageContainerManager scm, String ratisStorageDir, String raftGroupIdStr, RaftPeerId localRaftPeerId, InetSocketAddress addr, List raftPeers) throws IOException { this.scm = scm; this.scmRatisAddress = addr; this.port = addr.getPort(); + this.storageDir = ratisStorageDir; RaftProperties serverProperties = newRaftProperties(conf); this.raftPeerId = localRaftPeerId; @@ -149,8 +158,9 @@ public void run() { * Create a SCM Ratis Server instance. */ public static SCMRatisServer newSCMRatisServer( - ConfigurationSource conf, StorageContainerManager scm, - SCMNodeDetails scmNodeDetails, List peers) + SCMRatisServerConfiguration conf, StorageContainerManager scm, + SCMNodeDetails scmNodeDetails, List peers, + String ratisStorageDir) throws IOException { String scmServiceId = scmNodeDetails.getSCMServiceId(); @@ -174,8 +184,8 @@ public static SCMRatisServer newSCMRatisServer( raftPeers.add(raftPeer); } - return new SCMRatisServer(conf, scm, scmServiceId, localRaftPeerId, - ratisAddr, raftPeers); + return new SCMRatisServer(conf, scm, ratisStorageDir, scmServiceId, + localRaftPeerId, ratisAddr, raftPeers); } private UUID getRaftGroupIdFromOmServiceId(String scmServiceId) { @@ -187,13 +197,10 @@ private SCMStateMachine getStateMachine() { return new SCMStateMachine(this); } - private RaftProperties newRaftProperties(ConfigurationSource conf) { + private RaftProperties newRaftProperties(SCMRatisServerConfiguration conf) { final RaftProperties properties = new RaftProperties(); // Set RPC type - final String rpcType = conf.get( - ScmConfigKeys.OZONE_SCM_RATIS_RPC_TYPE_KEY, - ScmConfigKeys.OZONE_SCM_RATIS_RPC_TYPE_DEFAULT); - final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(rpcType); + final RpcType rpc = SupportedRpcType.valueOfIgnoreCase(conf.getRpcType()); RaftConfigKeys.Rpc.setType(properties, rpc); // Set the ratis port number if (rpc == SupportedRpcType.GRPC) { @@ -202,80 +209,41 @@ private RaftProperties newRaftProperties(ConfigurationSource conf) { NettyConfigKeys.Server.setPort(properties, port); } // Set Ratis storage directory - String storageDir = SCMRatisServer.getSCMRatisDirectory(conf); RaftServerConfigKeys.setStorageDir(properties, Collections.singletonList(new File(storageDir))); // Set RAFT segment size - final int raftSegmentSize = (int) conf.getStorageSize( - ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_SIZE_KEY, - ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_SIZE_DEFAULT, - org.apache.hadoop.hdds.conf.StorageUnit.BYTES); RaftServerConfigKeys.Log.setSegmentSizeMax(properties, - SizeInBytes.valueOf(raftSegmentSize)); + SizeInBytes.valueOf((long)conf.getSegmentSize())); // Set RAFT segment pre-allocated size - final int raftSegmentPreallocatedSize = (int) conf.getStorageSize( - ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY, - ScmConfigKeys.OZONE_SCM_RATIS_SEGMENT_PREALLOCATED_SIZE_DEFAULT, - org.apache.hadoop.hdds.conf.StorageUnit.BYTES); - int logAppenderQueueNumElements = conf.getInt( - ScmConfigKeys.OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS, - ScmConfigKeys.OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_NUM_ELEMENTS_DEFAULT); - final int logAppenderQueueByteLimit = (int) conf.getStorageSize( - ScmConfigKeys.OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT, - ScmConfigKeys.OZONE_SCM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT, - StorageUnit.BYTES); RaftServerConfigKeys.Log.Appender.setBufferElementLimit(properties, - logAppenderQueueNumElements); + (int)conf.getLogAppenderQueueByteLimit()); RaftServerConfigKeys.Log.Appender.setBufferByteLimit(properties, - SizeInBytes.valueOf(logAppenderQueueByteLimit)); + SizeInBytes.valueOf(conf.getLogAppenderQueueNum())); RaftServerConfigKeys.Log.setPreallocatedSize(properties, - SizeInBytes.valueOf(raftSegmentPreallocatedSize)); + SizeInBytes.valueOf((int)conf.getPreallocatedSize())); RaftServerConfigKeys.Log.Appender.setInstallSnapshotEnabled(properties, false); - final int logPurgeGap = conf.getInt( - ScmConfigKeys.OZONE_SCM_RATIS_LOG_PURGE_GAP, - ScmConfigKeys.OZONE_SCM_RATIS_LOG_PURGE_GAP_DEFAULT); - RaftServerConfigKeys.Log.setPurgeGap(properties, logPurgeGap); + RaftServerConfigKeys.Log.setPurgeGap(properties, conf.getLogPurgeGap()); // For grpc set the maximum message size // TODO: calculate the optimal max message size GrpcConfigKeys.setMessageSizeMax(properties, - SizeInBytes.valueOf(logAppenderQueueByteLimit)); + SizeInBytes.valueOf((int)conf.getLogAppenderQueueByteLimit())); // Set the server request timeout - TimeUnit serverRequestTimeoutUnit = - ScmConfigKeys.OZONE_SCM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT.getUnit(); - long serverRequestTimeoutDuration = conf.getTimeDuration( - ScmConfigKeys.OZONE_SCM_RATIS_SERVER_REQUEST_TIMEOUT_KEY, - ScmConfigKeys.OZONE_SCM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT - .getDuration(), serverRequestTimeoutUnit); final TimeDuration serverRequestTimeout = TimeDuration.valueOf( - serverRequestTimeoutDuration, serverRequestTimeoutUnit); + conf.getRequestTimeout(), TimeUnit.MILLISECONDS); RaftServerConfigKeys.Rpc.setRequestTimeout(properties, serverRequestTimeout); // Set timeout for server retry cache entry - TimeUnit retryCacheTimeoutUnit = ScmConfigKeys - .OZONE_SCM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT.getUnit(); - long retryCacheTimeoutDuration = conf.getTimeDuration( - ScmConfigKeys.OZONE_SCM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_KEY, - ScmConfigKeys.OZONE_SCM_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DEFAULT - .getDuration(), retryCacheTimeoutUnit); final TimeDuration retryCacheTimeout = TimeDuration.valueOf( - retryCacheTimeoutDuration, retryCacheTimeoutUnit); + conf.getRetryCacheTimeout(), TimeUnit.MILLISECONDS); RaftServerConfigKeys.RetryCache.setExpiryTime(properties, retryCacheTimeout); // Set the server min and max timeout - TimeUnit serverMinTimeoutUnit = - ScmConfigKeys.OZONE_SCM_RATIS_MINIMUM_TIMEOUT_DEFAULT.getUnit(); - long serverMinTimeoutDuration = conf.getTimeDuration( - ScmConfigKeys.OZONE_SCM_RATIS_MINIMUM_TIMEOUT_KEY, - ScmConfigKeys.OZONE_SCM_RATIS_MINIMUM_TIMEOUT_DEFAULT - .getDuration(), serverMinTimeoutUnit); final TimeDuration serverMinTimeout = TimeDuration.valueOf( - serverMinTimeoutDuration, serverMinTimeoutUnit); - long serverMaxTimeoutDuration = - serverMinTimeout.toLong(TimeUnit.MILLISECONDS) + 200; + conf.getMinTimeout(), TimeUnit.MILLISECONDS); final TimeDuration serverMaxTimeout = TimeDuration.valueOf( - serverMaxTimeoutDuration, serverMinTimeoutUnit); + conf.getMinTimeout() + 200L, TimeUnit.MILLISECONDS); RaftServerConfigKeys.Rpc.setTimeoutMin(properties, serverMinTimeout); RaftServerConfigKeys.Rpc.setTimeoutMax(properties, @@ -284,46 +252,24 @@ private RaftProperties newRaftProperties(ConfigurationSource conf) { RaftServerConfigKeys.Log.setSegmentCacheNumMax(properties, 2); // TODO: set max write buffer size // Set the ratis leader election timeout - TimeUnit leaderElectionMinTimeoutUnit = - ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT - .getUnit(); - long leaderElectionMinTimeoutduration = conf.getTimeDuration( - ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_DEFAULT - .getDuration(), leaderElectionMinTimeoutUnit); final TimeDuration leaderElectionMinTimeout = TimeDuration.valueOf( - leaderElectionMinTimeoutduration, leaderElectionMinTimeoutUnit); + conf.getMinLeaderElectionTimeout(), TimeUnit.MILLISECONDS); RaftServerConfigKeys.Rpc.setTimeoutMin(properties, leaderElectionMinTimeout); long leaderElectionMaxTimeout = leaderElectionMinTimeout.toLong( TimeUnit.MILLISECONDS) + 200; RaftServerConfigKeys.Rpc.setTimeoutMax(properties, TimeDuration.valueOf(leaderElectionMaxTimeout, TimeUnit.MILLISECONDS)); - TimeUnit nodeFailureTimeoutUnit = - ScmConfigKeys.OZONE_SCM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT - .getUnit(); - long nodeFailureTimeoutDuration = conf.getTimeDuration( - ScmConfigKeys.OZONE_SCM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_KEY, - ScmConfigKeys.OZONE_SCM_RATIS_SERVER_FAILURE_TIMEOUT_DURATION_DEFAULT - .getDuration(), nodeFailureTimeoutUnit); + final TimeDuration nodeFailureTimeout = TimeDuration.valueOf( - nodeFailureTimeoutDuration, nodeFailureTimeoutUnit); + conf.getFailureTimeout(), TimeUnit.MILLISECONDS); RaftServerConfigKeys.Notification.setNoLeaderTimeout(properties, nodeFailureTimeout); RaftServerConfigKeys.Rpc.setSlownessTimeout(properties, nodeFailureTimeout); // Ratis leader role check - TimeUnit roleCheckIntervalUnit = - ScmConfigKeys.OZONE_SCM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT - .getUnit(); - long roleCheckIntervalDuration = conf.getTimeDuration( - ScmConfigKeys.OZONE_SCM_RATIS_SERVER_ROLE_CHECK_INTERVAL_KEY, - ScmConfigKeys.OZONE_SCM_RATIS_SERVER_ROLE_CHECK_INTERVAL_DEFAULT - .getDuration(), nodeFailureTimeoutUnit); - this.roleCheckIntervalMs = TimeDuration.valueOf( - roleCheckIntervalDuration, roleCheckIntervalUnit) - .toLong(TimeUnit.MILLISECONDS); + this.roleCheckIntervalMs = conf.getRoleCheckerInterval(); this.roleCheckInitialDelayMs = leaderElectionMinTimeout .toLong(TimeUnit.MILLISECONDS); @@ -479,4 +425,166 @@ private void setServerRole(RaftPeerRole currentRole, this.roleCheckLock.writeLock().unlock(); } } + + /** + * Configuration used by SCM Ratis Server. + */ + @ConfigGroup(prefix = "ozone.scm.ratis") + public static class SCMRatisServerConfiguration { + @Config(key = "rpc.type", + type = ConfigType.STRING, + defaultValue = "GRPC", + tags = {SCM, OZONE, HA, RATIS}, + description = "Ratis supports different kinds of transports like" + + " netty, GRPC, Hadoop RPC etc. This picks one of those for" + + " this cluster." + ) + private String rpcType; + + @Config(key = "segment.size", + type = ConfigType.SIZE, + defaultValue = "16KB", + tags = {SCM, OZONE, HA, RATIS}, + description = "The size of the raft segment used by Apache Ratis on" + + " SCM. (16 KB by default)" + ) + private double segmentSize = 16 * 1024; + + @Config(key = "segment.preallocated.size", + type = ConfigType.SIZE, + defaultValue = "16KB", + tags = {SCM, OZONE, HA, RATIS}, + description = "The size of the buffer which is preallocated for" + + " raft segment used by Apache Ratis on SCM.(16 KB by default)" + ) + private double preallocatedSize = 16 * 1024; + + @Config(key = "log.appender.queue.num-elements", + type = ConfigType.INT, + defaultValue = "1024", + tags = {SCM, OZONE, HA, RATIS}, + description = "Number of operation pending with Raft's Log Worker." + ) + private int logAppenderQueueNum = 1024; + + @Config(key = "log.appender.queue.byte-limit", + type = ConfigType.SIZE, + defaultValue = "32MB", + tags = {SCM, OZONE, HA, RATIS}, + description = "Byte limit for Raft's Log Worker queue." + ) + private double logAppenderQueueByteLimit = 32 * 1024 * 1024; + + @Config(key = "log.purge.gap", + type = ConfigType.INT, + defaultValue = "1000000", + tags = {SCM, OZONE, HA, RATIS}, + description = "The minimum gap between log indices for Raft server to" + + " purge its log segments after taking snapshot." + ) + private int logPurgeGap = 1000000; + + @Config(key = "server.request.timeout", + type = ConfigType.TIME, + defaultValue = "3s", + tags = {SCM, OZONE, HA, RATIS}, + description = "The timeout duration for SCM's ratis server request." + ) + private long requestTimeout = 3 * 1000L; + + @Config(key = "server.retry.cache.timeout", + type = ConfigType.TIME, + defaultValue = "60s", + tags = {SCM, OZONE, HA, RATIS}, + description = "Retry Cache entry timeout for SCM's ratis server." + ) + private long retryCacheTimeout = 60 * 1000L; + + @Config(key = "minimum.timeout", + type = ConfigType.TIME, + defaultValue = "1s", + tags = {SCM, OZONE, HA, RATIS}, + description = "The minimum timeout duration for SCM's Ratis server rpc." + ) + private long minTimeout = 1 * 1000L; + + @Config(key = "leader.election.minimum.timeout.duration", + type = ConfigType.TIME, + defaultValue = "1s", + tags = {SCM, OZONE, HA, RATIS}, + description = "The minimum timeout duration for SCM ratis leader" + + " election. Default is 1s." + ) + private long minLeaderElectionTimeout = 1 * 1000L; + + @Config(key = "server.failure.timeout.duration", + type = ConfigType.TIME, + defaultValue = "120s", + tags = {SCM, OZONE, HA, RATIS}, + description = "The timeout duration for ratis server failure" + + " detection, once the threshold has reached, the ratis state" + + " machine will be informed about the failure in the ratis ring." + ) + private long failureTimeout = 120 * 1000L; + + @Config(key = "server.role.check.interval", + type = ConfigType.TIME, + defaultValue = "15s", + tags = {SCM, OZONE, HA, RATIS}, + description = "The interval between SCM leader performing a role" + + " check on its ratis server. Ratis server informs SCM if it loses" + + " the leader role. The scheduled check is an secondary check to" + + " ensure that the leader role is updated periodically" + ) + private long roleCheckerInterval = 15 * 1000L; + + public String getRpcType() { + return rpcType; + } + + public double getSegmentSize() { + return segmentSize; + } + + public double getPreallocatedSize() { + return preallocatedSize; + } + + public int getLogAppenderQueueNum() { + return logAppenderQueueNum; + } + + public double getLogAppenderQueueByteLimit() { + return logAppenderQueueByteLimit; + } + + public int getLogPurgeGap() { + return logPurgeGap; + } + + public long getRequestTimeout() { + return requestTimeout; + } + + public long getRetryCacheTimeout() { + return retryCacheTimeout; + } + + public long getMinTimeout() { + return minTimeout; + } + + public long getMinLeaderElectionTimeout() { + return minLeaderElectionTimeout; + } + + public long getFailureTimeout() { + return failureTimeout; + } + + + public long getRoleCheckerInterval() { + return roleCheckerInterval; + } + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java index 64752dab5543..5d1ed4694680 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java @@ -497,7 +497,7 @@ public static StorageContainerManager getScm(OzoneConfiguration conf, // writes the version file properties scmStore.initialize(); } - return new StorageContainerManager(conf, configurator); + return StorageContainerManager.createSCM(conf, configurator); } public static ContainerInfo getContainer( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java index d6981d3d8cc5..40799655b44e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java @@ -117,8 +117,10 @@ public void verifyRaftGroupIdGenerationWithCustomOmServiceId() throws // Starts a single node Ratis server scmRatisServer.stop(); SCMRatisServer newScmRatisServer = SCMRatisServer - .newSCMRatisServer(newConf, scm, nodeDetails, - Collections.emptyList()); + .newSCMRatisServer(newConf.getObject(SCMRatisServer + .SCMRatisServerConfiguration.class), scm, nodeDetails, + Collections.emptyList(), + SCMRatisServer.getSCMRatisDirectory(newConf)); newScmRatisServer.start(); UUID uuid = UUID.nameUUIDFromBytes(customScmServiceId.getBytes()); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java index 797c8051bc78..b1a79d94ade1 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genesis/GenesisUtil.java @@ -133,7 +133,7 @@ static StorageContainerManager getScm(OzoneConfiguration conf, // writes the version file properties scmStore.initialize(); } - return new StorageContainerManager(conf, configurator); + return StorageContainerManager.createSCM(conf, configurator); } static void configureSCM(OzoneConfiguration conf, int numHandlers) { From 1f3ef369755a4d2363506cef545c88c144f14fc7 Mon Sep 17 00:00:00 2001 From: Nandakumar Date: Tue, 26 May 2020 13:26:16 +0530 Subject: [PATCH 05/51] HDDS-3186. Introduce generic SCMRatisRequest and SCMRatisResponse. (#959) * HDDS-3186. Initial version. * HDDS-3186. Additional changes. --- .../dev-support/findbugsExcludeFile.xml | 21 + hadoop-hdds/server-scm/pom.xml | 38 ++ .../scm/container/ContainerManagerImpl.java | 282 +++++++++++++ .../scm/container/ContainerManagerV2.java | 189 +++++++++ .../container/ContainerStateManagerImpl.java | 397 ++++++++++++++++++ .../container/ContainerStateManagerV2.java | 75 ++++ .../apache/hadoop/hdds/scm/ha/RatisUtil.java | 155 +++++++ .../hadoop/hdds/scm/ha/ReflectionUtil.java | 67 +++ .../hdds/scm/ha/SCMHAConfiguration.java | 225 ++++++++++ .../hdds/scm/ha/SCMHAInvocationHandler.java | 93 ++++ .../hadoop/hdds/scm/ha/SCMHAManager.java | 76 ++++ .../hadoop/hdds/scm/ha/SCMRatisRequest.java | 147 +++++++ .../hadoop/hdds/scm/ha/SCMRatisResponse.java | 127 ++++++ .../hadoop/hdds/scm/ha/SCMRatisServer.java | 109 +++++ .../hadoop/hdds/scm/ha/SCMStateMachine.java | 92 ++++ .../hadoop/hdds/scm/metadata/Replicate.java | 33 ++ .../src/main/proto/SCMRatisProtocol.proto | 46 ++ 17 files changed, 2172 insertions(+) create mode 100644 hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ReflectionUtil.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/Replicate.java create mode 100644 hadoop-hdds/server-scm/src/main/proto/SCMRatisProtocol.proto diff --git a/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml new file mode 100644 index 000000000000..3571a8929e3f --- /dev/null +++ b/hadoop-hdds/server-scm/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,21 @@ + + + + + + diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index dcbc42a17d52..8c17aaef4566 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -128,6 +128,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-hdds-hadoop-dependency-test test + + com.google.protobuf + protobuf-java + compile + @@ -163,6 +168,39 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + org.apache.hadoop + hadoop-maven-plugins + + + compile-protoc + + protoc + + + ${protobuf.version} + + + ${basedir}/src/main/proto + + + + ${basedir}/src/main/proto + + SCMRatisProtocol.proto + + + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java new file mode 100644 index 000000000000..0404530b2f1f --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -0,0 +1,282 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.container; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.stream.Collectors; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * TODO: Add javadoc. + */ +public class ContainerManagerImpl implements ContainerManagerV2 { + + /** + * + */ + private static final Logger LOG = LoggerFactory.getLogger( + ContainerManagerImpl.class); + + /** + * + */ + private final ReadWriteLock lock; + + /** + * + */ + private final PipelineManager pipelineManager; + + /** + * + */ + private final ContainerStateManagerV2 containerStateManager; + + /** + * + */ + public ContainerManagerImpl( + // Introduce builder for this class? + final Configuration conf, final PipelineManager pipelineManager, + final SCMHAManager scmhaManager, + final Table containerStore) + throws IOException { + this.lock = new ReentrantReadWriteLock(); + this.pipelineManager = pipelineManager; + this.containerStateManager = ContainerStateManagerImpl.newBuilder() + .setConfiguration(conf) + .setPipelineManager(pipelineManager) + .setRatisServer(scmhaManager.getRatisServer()) + .setContainerStore(containerStore) + .build(); + } + + @Override + public Set getContainerIDs() { + lock.readLock().lock(); + try { + return containerStateManager.getContainerIDs(); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public Set getContainers() { + lock.readLock().lock(); + try { + return containerStateManager.getContainerIDs().stream().map(id -> { + try { + return containerStateManager.getContainer(id); + } catch (ContainerNotFoundException e) { + // How can this happen? o_O + return null; + } + }).filter(Objects::nonNull).collect(Collectors.toSet()); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public ContainerInfo getContainer(final ContainerID containerID) + throws ContainerNotFoundException { + lock.readLock().lock(); + try { + return containerStateManager.getContainer(containerID); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public Set getContainers(final LifeCycleState state) { + lock.readLock().lock(); + try { + return containerStateManager.getContainerIDs(state).stream().map(id -> { + try { + return containerStateManager.getContainer(id); + } catch (ContainerNotFoundException e) { + // How can this happen? o_O + return null; + } + }).filter(Objects::nonNull).collect(Collectors.toSet()); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public boolean exists(final ContainerID containerID) { + lock.readLock().lock(); + try { + return (containerStateManager.getContainer(containerID) != null); + } catch (ContainerNotFoundException ex) { + return false; + } finally { + lock.readLock().unlock(); + } + } + + @Override + public List listContainers(final ContainerID startID, + final int count) { + lock.readLock().lock(); + try { + final long startId = startID == null ? 0 : startID.getId(); + final List containersIds = + new ArrayList<>(containerStateManager.getContainerIDs()); + Collections.sort(containersIds); + return containersIds.stream() + .filter(id -> id.getId() > startId) + .limit(count) + .map(id -> { + try { + return containerStateManager.getContainer(id); + } catch (ContainerNotFoundException ex) { + // This can never happen, as we hold lock no one else can remove + // the container after we got the container ids. + LOG.warn("Container Missing.", ex); + return null; + } + }).collect(Collectors.toList()); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public ContainerInfo allocateContainer(final ReplicationType type, + final ReplicationFactor replicationFactor, final String owner) + throws IOException { + lock.writeLock().lock(); + try { + final List pipelines = pipelineManager + .getPipelines(type, replicationFactor, Pipeline.PipelineState.OPEN); + + if (pipelines.isEmpty()) { + throw new IOException("Could not allocate container. Cannot get any" + + " matching pipeline for Type:" + type + ", Factor:" + + replicationFactor + ", State:PipelineState.OPEN"); + } + + final ContainerID containerID = containerStateManager + .getNextContainerID(); + final Pipeline pipeline = pipelines.get( + (int) containerID.getId() % pipelines.size()); + + final ContainerInfoProto containerInfo = ContainerInfoProto.newBuilder() + .setState(LifeCycleState.OPEN) + .setPipelineID(pipeline.getId().getProtobuf()) + .setUsedBytes(0) + .setNumberOfKeys(0) + .setStateEnterTime(Time.now()) + .setOwner(owner) + .setContainerID(containerID.getId()) + .setDeleteTransactionId(0) + .setReplicationFactor(pipeline.getFactor()) + .setReplicationType(pipeline.getType()) + .build(); + containerStateManager.addContainer(containerInfo); + if (LOG.isTraceEnabled()) { + LOG.trace("New container allocated: {}", containerInfo); + } + return containerStateManager.getContainer(containerID); + } finally { + lock.writeLock().unlock(); + } + } + + @Override + public void deleteContainer(final ContainerID containerID) + throws ContainerNotFoundException { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + @Override + public void updateContainerState(final ContainerID containerID, + final LifeCycleEvent event) + throws ContainerNotFoundException { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + @Override + public Set getContainerReplicas( + final ContainerID containerID) throws ContainerNotFoundException { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + @Override + public void updateContainerReplica(final ContainerID containerID, + final ContainerReplica replica) + throws ContainerNotFoundException { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + @Override + public void removeContainerReplica(final ContainerID containerID, + final ContainerReplica replica) + throws ContainerNotFoundException, ContainerReplicaNotFoundException { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + @Override + public void updateDeleteTransactionId( + final Map deleteTransactionMap) throws IOException { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + @Override + public ContainerInfo getMatchingContainer(final long size, final String owner, + final Pipeline pipeline, final List excludedContainerIDS) { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + @Override + public void notifyContainerReportProcessing(final boolean isFullReport, + final boolean success) { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + @Override + public void close() throws IOException { + throw new UnsupportedOperationException("Not yet implemented!"); + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java new file mode 100644 index 000000000000..37c7b709d458 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java @@ -0,0 +1,189 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hdds.scm.container; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; + +/** + * TODO: Add extensive javadoc. + * + * ContainerManager class contains the mapping from a name to a pipeline + * mapping. This is used by SCM when allocating new locations and when + * looking up a key. + */ +public interface ContainerManagerV2 extends Closeable { + + + /** + * Returns all the container Ids managed by ContainerManager. + * + * @return Set of ContainerID + */ + Set getContainerIDs(); + + /** + * Returns all the containers managed by ContainerManager. + * + * @return List of ContainerInfo + */ + Set getContainers(); + + /** + * Returns all the containers which are in the specified state. + * + * @return List of ContainerInfo + */ + Set getContainers(LifeCycleState state); + + /** + * Returns the ContainerInfo from the container ID. + * + */ + ContainerInfo getContainer(ContainerID containerID) + throws ContainerNotFoundException; + + boolean exists(ContainerID containerID); + + /** + * Returns containers under certain conditions. + * Search container IDs from start ID(exclusive), + * The max size of the searching range cannot exceed the + * value of count. + * + * @param startID start containerID, >=0, + * start searching at the head if 0. + * @param count count must be >= 0 + * Usually the count will be replace with a very big + * value instead of being unlimited in case the db is very big. + * + * @return a list of container. + */ + List listContainers(ContainerID startID, int count); + + /** + * Allocates a new container for a given keyName and replication factor. + * + * @param replicationFactor - replication factor of the container. + * @param owner + * @return - ContainerInfo. + * @throws IOException + */ + ContainerInfo allocateContainer(ReplicationType type, + ReplicationFactor replicationFactor, + String owner) throws IOException; + + /** + * Deletes a container from SCM. + * + * @param containerID - Container ID + * @throws IOException + */ + void deleteContainer(ContainerID containerID) + throws ContainerNotFoundException; + + /** + * Update container state. + * @param containerID - Container ID + * @param event - container life cycle event + * @throws IOException + */ + void updateContainerState(ContainerID containerID, + LifeCycleEvent event) + throws ContainerNotFoundException; + + /** + * Returns the latest list of replicas for given containerId. + * + * @param containerID Container ID + * @return Set of ContainerReplica + */ + Set getContainerReplicas(ContainerID containerID) + throws ContainerNotFoundException; + + /** + * Adds a container Replica for the given Container. + * + * @param containerID Container ID + * @param replica ContainerReplica + */ + void updateContainerReplica(ContainerID containerID, ContainerReplica replica) + throws ContainerNotFoundException; + + /** + * Remove a container Replica form a given Container. + * + * @param containerID Container ID + * @param replica ContainerReplica + * @return True of dataNode is removed successfully else false. + */ + void removeContainerReplica(ContainerID containerID, ContainerReplica replica) + throws ContainerNotFoundException, ContainerReplicaNotFoundException; + + /** + * Update deleteTransactionId according to deleteTransactionMap. + * + * @param deleteTransactionMap Maps the containerId to latest delete + * transaction id for the container. + * @throws IOException + */ + void updateDeleteTransactionId(Map deleteTransactionMap) + throws IOException; + + /** + * Returns ContainerInfo which matches the requirements. + * @param size - the amount of space required in the container + * @param owner - the user which requires space in its owned container + * @param pipeline - pipeline to which the container should belong + * @return ContainerInfo for the matching container. + */ + default ContainerInfo getMatchingContainer(long size, String owner, + Pipeline pipeline) { + return getMatchingContainer(size, owner, pipeline, Collections.emptyList()); + } + + /** + * Returns ContainerInfo which matches the requirements. + * @param size - the amount of space required in the container + * @param owner - the user which requires space in its owned container + * @param pipeline - pipeline to which the container should belong. + * @param excludedContainerIDS - containerIds to be excluded. + * @return ContainerInfo for the matching container. + */ + ContainerInfo getMatchingContainer(long size, String owner, + Pipeline pipeline, + List excludedContainerIDS); + + /** + * Once after report processor handler completes, call this to notify + * container manager to increment metrics. + * @param isFullReport + * @param success + */ + // Is it possible to remove this from the Interface? + void notifyContainerReportProcessing(boolean isFullReport, boolean success); +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java new file mode 100644 index 000000000000..16fe3407bde4 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java @@ -0,0 +1,397 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.container; + +import java.io.IOException; + +import java.lang.reflect.Proxy; +import java.util.HashSet; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; +import org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler; +import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; +import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.Table.KeyValue; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import com.google.common.base.Preconditions; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.states.ContainerState; +import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.ozone.common.statemachine.StateMachine; + +/** + * TODO: Add javadoc. + */ +public final class ContainerStateManagerImpl + implements ContainerStateManagerV2 { + + /* ********************************************************************** + * Container Life Cycle * + * * + * Event and State Transition Mapping: * + * * + * State: OPEN ----------------> CLOSING * + * Event: FINALIZE * + * * + * State: CLOSING ----------------> QUASI_CLOSED * + * Event: QUASI_CLOSE * + * * + * State: CLOSING ----------------> CLOSED * + * Event: CLOSE * + * * + * State: QUASI_CLOSED ----------------> CLOSED * + * Event: FORCE_CLOSE * + * * + * State: CLOSED ----------------> DELETING * + * Event: DELETE * + * * + * State: DELETING ----------------> DELETED * + * Event: CLEANUP * + * * + * * + * Container State Flow: * + * * + * [OPEN]--------------->[CLOSING]--------------->[QUASI_CLOSED] * + * (FINALIZE) | (QUASI_CLOSE) | * + * | | * + * | | * + * (CLOSE) | (FORCE_CLOSE) | * + * | | * + * | | * + * +--------->[CLOSED]<--------+ * + * | * + * (DELETE)| * + * | * + * | * + * [DELETING] * + * | * + * (CLEANUP) | * + * | * + * V * + * [DELETED] * + * * + ************************************************************************/ + + /** + * + */ + private static final Logger LOG = LoggerFactory.getLogger( + ContainerStateManagerImpl.class); + + /** + * + */ + private final long containerSize; + + /** + * + */ + private final AtomicLong nextContainerID; + + /** + * + */ + private final ContainerStateMap containers; + + /** + * + */ + private final PipelineManager pipelineManager; + + /** + * + */ + private Table containerStore; + + /** + * + */ + private final StateMachine stateMachine; + + /** + * + */ + private final ConcurrentHashMap lastUsedMap; + + /** + * + */ + private ContainerStateManagerImpl(final Configuration conf, + final PipelineManager pipelineManager, + final Table containerStore) + throws IOException { + this.pipelineManager = pipelineManager; + this.containerStore = containerStore; + this.stateMachine = newStateMachine(); + this.containerSize = getConfiguredContainerSize(conf); + this.nextContainerID = new AtomicLong(); + this.containers = new ContainerStateMap(); + this.lastUsedMap = new ConcurrentHashMap<>(); + + initialize(); + } + + /** + * + */ + private StateMachine newStateMachine() { + + final Set finalStates = new HashSet<>(); + + // These are the steady states of a container. + finalStates.add(LifeCycleState.OPEN); + finalStates.add(LifeCycleState.CLOSED); + finalStates.add(LifeCycleState.DELETED); + + final StateMachine containerLifecycleSM = + new StateMachine<>(LifeCycleState.OPEN, finalStates); + + containerLifecycleSM.addTransition(LifeCycleState.OPEN, + LifeCycleState.CLOSING, + LifeCycleEvent.FINALIZE); + + containerLifecycleSM.addTransition(LifeCycleState.CLOSING, + LifeCycleState.QUASI_CLOSED, + LifeCycleEvent.QUASI_CLOSE); + + containerLifecycleSM.addTransition(LifeCycleState.CLOSING, + LifeCycleState.CLOSED, + LifeCycleEvent.CLOSE); + + containerLifecycleSM.addTransition(LifeCycleState.QUASI_CLOSED, + LifeCycleState.CLOSED, + LifeCycleEvent.FORCE_CLOSE); + + containerLifecycleSM.addTransition(LifeCycleState.CLOSED, + LifeCycleState.DELETING, + LifeCycleEvent.DELETE); + + containerLifecycleSM.addTransition(LifeCycleState.DELETING, + LifeCycleState.DELETED, + LifeCycleEvent.CLEANUP); + + return containerLifecycleSM; + } + + /** + * + */ + private long getConfiguredContainerSize(final Configuration conf) { + return (long) conf.getStorageSize( + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, + ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, + StorageUnit.BYTES); + } + + /** + * + */ + private void initialize() throws IOException { + TableIterator> + iterator = containerStore.iterator(); + + while (iterator.hasNext()) { + final ContainerInfo container = iterator.next().getValue(); + Preconditions.checkNotNull(container); + containers.addContainer(container); + nextContainerID.set(Long.max(container.containerID().getId(), + nextContainerID.get())); + if (container.getState() == LifeCycleState.OPEN) { + try { + pipelineManager.addContainerToPipeline(container.getPipelineID(), + ContainerID.valueof(container.getContainerID())); + } catch (PipelineNotFoundException ex) { + LOG.warn("Found container {} which is in OPEN state with " + + "pipeline {} that does not exist. Marking container for " + + "closing.", container, container.getPipelineID()); + updateContainerState(container.containerID(), + LifeCycleEvent.FINALIZE); + } + } + } + } + + @Override + public ContainerID getNextContainerID() { + return ContainerID.valueof(nextContainerID.get()); + } + + @Override + public Set getContainerIDs() { + return containers.getAllContainerIDs(); + } + + @Override + public Set getContainerIDs(final LifeCycleState state) { + return containers.getContainerIDsByState(state); + } + + @Override + public ContainerInfo getContainer(final ContainerID containerID) + throws ContainerNotFoundException { + return containers.getContainerInfo(containerID); + } + + @Override + public Set getContainerReplicas( + final ContainerID containerID) throws ContainerNotFoundException { + return containers.getContainerReplicas(containerID); + } + + @Override + public void addContainer(final ContainerInfoProto containerInfo) + throws IOException { + + // Change the exception thrown to PipelineNotFound and + // ClosedPipelineException once ClosedPipelineException is introduced + // in PipelineManager. + + Preconditions.checkNotNull(containerInfo); + final ContainerInfo container = ContainerInfo.fromProtobuf(containerInfo); + if (getContainer(container.containerID()) == null) { + Preconditions.checkArgument(nextContainerID.get() + == container.containerID().getId(), + "ContainerID mismatch."); + + pipelineManager.addContainerToPipeline( + container.getPipelineID(), container.containerID()); + containers.addContainer(container); + nextContainerID.incrementAndGet(); + } + } + + void updateContainerState(final ContainerID containerID, + final LifeCycleEvent event) + throws IOException { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + + void updateContainerReplica(final ContainerID containerID, + final ContainerReplica replica) + throws ContainerNotFoundException { + containers.updateContainerReplica(containerID, replica); + } + + + void updateDeleteTransactionId( + final Map deleteTransactionMap) { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + ContainerInfo getMatchingContainer(final long size, String owner, + PipelineID pipelineID, NavigableSet containerIDs) { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + + NavigableSet getMatchingContainerIDs(final String owner, + final ReplicationType type, final ReplicationFactor factor, + final LifeCycleState state) { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + void removeContainerReplica(final ContainerID containerID, + final ContainerReplica replica) + throws ContainerNotFoundException, ContainerReplicaNotFoundException { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + + void removeContainer(final ContainerID containerID) + throws ContainerNotFoundException { + throw new UnsupportedOperationException("Not yet implemented!"); + } + + void close() throws IOException { + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builder for ContainerStateManager. + */ + public static class Builder { + private Configuration conf; + private PipelineManager pipelineMgr; + private SCMRatisServer scmRatisServer; + private Table table; + + public Builder setConfiguration(final Configuration config) { + conf = config; + return this; + } + + public Builder setPipelineManager(final PipelineManager pipelineManager) { + pipelineMgr = pipelineManager; + return this; + } + + public Builder setRatisServer(final SCMRatisServer ratisServer) { + scmRatisServer = ratisServer; + return this; + } + + public Builder setContainerStore( + final Table containerStore) { + table = containerStore; + return this; + } + + public ContainerStateManagerV2 build() throws IOException { + Preconditions.checkNotNull(conf); + Preconditions.checkNotNull(pipelineMgr); + Preconditions.checkNotNull(scmRatisServer); + Preconditions.checkNotNull(table); + + final ContainerStateManagerV2 csm = new ContainerStateManagerImpl( + conf, pipelineMgr, table); + scmRatisServer.registerStateMachineHandler(RequestType.CONTAINER, csm); + + final SCMHAInvocationHandler invocationHandler = + new SCMHAInvocationHandler(RequestType.CONTAINER, csm, + scmRatisServer); + + return (ContainerStateManagerV2) Proxy.newProxyInstance( + SCMHAInvocationHandler.class.getClassLoader(), + new Class[]{ContainerStateManagerV2.class}, invocationHandler); + } + + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java new file mode 100644 index 000000000000..9960354be402 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.container; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; +import org.apache.hadoop.hdds.scm.metadata.Replicate; + +import java.io.IOException; +import java.util.Set; + +/** + * + * TODO: Add proper javadoc. + * + * Implementation of methods marked with {@code @Replicate} annotation should be + * + * 1. Idempotent + * 2. Arguments should be of protobuf objects + * 3. Return type should be of protobuf object + * 4. The declaration should throw RaftException + * + */ +public interface ContainerStateManagerV2 { + + /** + * + */ + ContainerID getNextContainerID(); + + /** + * + */ + Set getContainerIDs(); + + /** + * + */ + Set getContainerIDs(LifeCycleState state); + + /** + * + */ + ContainerInfo getContainer(ContainerID containerID) + throws ContainerNotFoundException; + + /** + * + */ + Set getContainerReplicas(ContainerID containerID) + throws ContainerNotFoundException; + + /** + * + */ + @Replicate + void addContainer(ContainerInfoProto containerInfo) + throws IOException; + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java new file mode 100644 index 000000000000..1bc16974362f --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/RatisUtil.java @@ -0,0 +1,155 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import com.google.common.base.Strings; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.server.ServerUtils; +import org.apache.ratis.RaftConfigKeys; +import org.apache.ratis.conf.RaftProperties; +import org.apache.ratis.grpc.GrpcConfigKeys; +import org.apache.ratis.rpc.RpcType; +import org.apache.ratis.server.RaftServerConfigKeys; +import org.apache.ratis.util.SizeInBytes; +import org.apache.ratis.util.TimeDuration; + +import java.io.File; +import java.util.Collections; +import java.util.concurrent.TimeUnit; + +import static org.apache.ratis.server.RaftServerConfigKeys.Log; +import static org.apache.ratis.server.RaftServerConfigKeys.RetryCache; +import static org.apache.ratis.server.RaftServerConfigKeys.Rpc; +import static org.apache.ratis.server.RaftServerConfigKeys.Snapshot; + +/** + * Ratis Util for SCM HA. + */ +public final class RatisUtil { + + private RatisUtil() { + } + + + /** + * Constructs new Raft Properties instance using {@link SCMHAConfiguration}. + * @param haConf SCMHAConfiguration + * @param conf ConfigurationSource + */ + public static RaftProperties newRaftProperties( + final SCMHAConfiguration haConf, final ConfigurationSource conf) { + //TODO: Remove ConfigurationSource! + // TODO: Check the default values. + final RaftProperties properties = new RaftProperties(); + setRaftStorageDir(properties, haConf, conf); + setRaftRpcProperties(properties, haConf); + setRaftLogProperties(properties, haConf); + setRaftRetryCacheProperties(properties, haConf); + setRaftSnapshotProperties(properties, haConf); + return properties; + } + + /** + * Set the local directory where ratis logs will be stored. + * + * @param properties RaftProperties instance which will be updated + * @param haConf SCMHAConfiguration + * @param conf ConfigurationSource + */ + public static void setRaftStorageDir(final RaftProperties properties, + final SCMHAConfiguration haConf, + final ConfigurationSource conf) { + String storageDir = haConf.getRatisStorageDir(); + if (Strings.isNullOrEmpty(storageDir)) { + storageDir = ServerUtils.getDefaultRatisDirectory(conf); + } + RaftServerConfigKeys.setStorageDir(properties, + Collections.singletonList(new File(storageDir))); + } + + /** + * Set properties related to Raft RPC. + * + * @param properties RaftProperties instance which will be updated + * @param conf SCMHAConfiguration + */ + private static void setRaftRpcProperties(final RaftProperties properties, + final SCMHAConfiguration conf) { + RaftConfigKeys.Rpc.setType(properties, + RpcType.valueOf(conf.getRatisRpcType())); + GrpcConfigKeys.Server.setPort(properties, + conf.getRatisBindAddress().getPort()); + GrpcConfigKeys.setMessageSizeMax(properties, + SizeInBytes.valueOf("32m")); + + Rpc.setRequestTimeout(properties, TimeDuration.valueOf( + conf.getRatisRequestTimeout(), TimeUnit.MILLISECONDS)); + Rpc.setTimeoutMin(properties, TimeDuration.valueOf( + conf.getRatisRequestMinTimeout(), TimeUnit.MILLISECONDS)); + Rpc.setTimeoutMax(properties, TimeDuration.valueOf( + conf.getRatisRequestMaxTimeout(), TimeUnit.MILLISECONDS)); + Rpc.setSlownessTimeout(properties, TimeDuration.valueOf( + conf.getRatisNodeFailureTimeout(), TimeUnit.MILLISECONDS)); + } + + /** + * Set properties related to Raft Log. + * + * @param properties RaftProperties instance which will be updated + * @param conf SCMHAConfiguration + */ + private static void setRaftLogProperties(final RaftProperties properties, + final SCMHAConfiguration conf) { + Log.setSegmentSizeMax(properties, + SizeInBytes.valueOf(conf.getRaftSegmentSize())); + Log.Appender.setBufferElementLimit(properties, + conf.getRaftLogAppenderQueueByteLimit()); + Log.Appender.setBufferByteLimit(properties, + SizeInBytes.valueOf(conf.getRaftLogAppenderQueueByteLimit())); + Log.setPreallocatedSize(properties, + SizeInBytes.valueOf(conf.getRaftSegmentPreAllocatedSize())); + Log.Appender.setInstallSnapshotEnabled(properties, false); + Log.setPurgeGap(properties, conf.getRaftLogPurgeGap()); + Log.setSegmentCacheNumMax(properties, 2); + } + + /** + * Set properties related to Raft Retry Cache. + * + * @param properties RaftProperties instance which will be updated + * @param conf SCMHAConfiguration + */ + private static void setRaftRetryCacheProperties( + final RaftProperties properties, final SCMHAConfiguration conf) { + RetryCache.setExpiryTime(properties, TimeDuration.valueOf( + conf.getRatisRetryCacheTimeout(), TimeUnit.MILLISECONDS)); + } + + /** + * Set properties related to Raft Snapshot. + * + * @param properties RaftProperties instance which will be updated + * @param conf SCMHAConfiguration + */ + private static void setRaftSnapshotProperties( + final RaftProperties properties, final SCMHAConfiguration conf) { + Snapshot.setAutoTriggerEnabled(properties, true); + Snapshot.setAutoTriggerThreshold(properties, 400000); + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ReflectionUtil.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ReflectionUtil.java new file mode 100644 index 000000000000..7c54723d7470 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/ReflectionUtil.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import java.lang.reflect.Method; +import java.util.HashMap; +import java.util.Map; + +/** + * Reflection util for SCM HA. + */ +public final class ReflectionUtil { + + private static Map> classCache = new HashMap<>(); + + private ReflectionUtil() { + } + + /** + * Returns the {@code Class} object associated with the given string name. + * + * @param className the fully qualified name of the desired class. + * @return the {@code Class} object for the class with the + * specified name. + * @throws ClassNotFoundException if the class cannot be located + */ + public static Class getClass(String className) + throws ClassNotFoundException { + if (!classCache.containsKey(className)) { + classCache.put(className, Class.forName(className)); + } + return classCache.get(className); + } + + /** + * Returns a {@code Method} object that reflects the specified public + * member method of the given {@code Class} object. + * + * @param clazz the class object which has the method + * @param methodName the name of the method + * @param arg the list of parameters + * @return the {@code Method} object that matches the specified + * {@code name} and {@code parameterTypes} + * @throws NoSuchMethodException if a matching method is not found + * or if the name is "<init>"or "<clinit>". + */ + public static Method getMethod( + final Class clazz, final String methodName, final Class... arg) + throws NoSuchMethodException { + return clazz.getMethod(methodName, arg); + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java new file mode 100644 index 000000000000..1cb8c65675f8 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java @@ -0,0 +1,225 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ *

http://www.apache.org/licenses/LICENSE-2.0 + *

+ *

Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import java.net.InetSocketAddress; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.conf.Config; +import org.apache.hadoop.hdds.conf.ConfigGroup; +import org.apache.hadoop.hdds.conf.ConfigType; +import org.apache.hadoop.net.NetUtils; + +import static org.apache.hadoop.hdds.conf.ConfigTag.HA; +import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; +import static org.apache.hadoop.hdds.conf.ConfigTag.RATIS; +import static org.apache.hadoop.hdds.conf.ConfigTag.SCM; + +/** + * Configuration used by SCM HA. + */ +@ConfigGroup(prefix = "ozone.scm.ha") +public class SCMHAConfiguration { + + @Config(key = "ratis.storage.dir", + type = ConfigType.STRING, + defaultValue = "", + tags = {OZONE, SCM, HA, RATIS}, + description = "Storage directory used by SCM to write Ratis logs." + ) + private String ratisStorageDir; + + @Config(key = "ratis.bind.host", + type = ConfigType.STRING, + defaultValue = "0.0.0.0", + tags = {OZONE, SCM, HA, RATIS}, + description = "Host used by SCM for binding Ratis Server." + ) + private String ratisBindHost = "0.0.0.0"; + + @Config(key = "ratis.bind.port", + type = ConfigType.STRING, + defaultValue = "9865", + tags = {OZONE, SCM, HA, RATIS}, + description = "Port used by SCM for Ratis Server." + ) + private int ratisBindPort = 9865; + + + @Config(key = "ratis.rpc.type", + type = ConfigType.STRING, + defaultValue = "GRPC", + tags = {SCM, OZONE, HA, RATIS}, + description = "Ratis supports different kinds of transports like" + + " netty, GRPC, Hadoop RPC etc. This picks one of those for" + + " this cluster." + ) + private String ratisRpcType; + + @Config(key = "ratis.segment.size", + type = ConfigType.SIZE, + defaultValue = "16KB", + tags = {SCM, OZONE, HA, RATIS}, + description = "The size of the raft segment used by Apache Ratis on" + + " SCM. (16 KB by default)" + ) + private long raftSegmentSize = 16L * 1024L; + + @Config(key = "ratis.segment.preallocated.size", + type = ConfigType.SIZE, + defaultValue = "16KB", + tags = {SCM, OZONE, HA, RATIS}, + description = "The size of the buffer which is preallocated for" + + " raft segment used by Apache Ratis on SCM.(16 KB by default)" + ) + private long raftSegmentPreAllocatedSize = 16 * 1024; + + @Config(key = "ratis.log.appender.queue.num-elements", + type = ConfigType.INT, + defaultValue = "1024", + tags = {SCM, OZONE, HA, RATIS}, + description = "Number of operation pending with Raft's Log Worker." + ) + private int raftLogAppenderQueueNum = 1024; + + @Config(key = "ratis.log.appender.queue.byte-limit", + type = ConfigType.SIZE, + defaultValue = "32MB", + tags = {SCM, OZONE, HA, RATIS}, + description = "Byte limit for Raft's Log Worker queue." + ) + private int raftLogAppenderQueueByteLimit = 32 * 1024 * 1024; + + @Config(key = "ratis.log.purge.gap", + type = ConfigType.INT, + defaultValue = "1000000", + tags = {SCM, OZONE, HA, RATIS}, + description = "The minimum gap between log indices for Raft server to" + + " purge its log segments after taking snapshot." + ) + private int raftLogPurgeGap = 1000000; + + @Config(key = "ratis.request.timeout", + type = ConfigType.TIME, + defaultValue = "3000ms", + tags = {SCM, OZONE, HA, RATIS}, + description = "The timeout duration for SCM's Ratis server RPC." + ) + private long ratisRequestTimeout = 3000L; + + @Config(key = "ratis.server.retry.cache.timeout", + type = ConfigType.TIME, + defaultValue = "60s", + tags = {SCM, OZONE, HA, RATIS}, + description = "Retry Cache entry timeout for SCM's ratis server." + ) + private long ratisRetryCacheTimeout = 60 * 1000L; + + + @Config(key = "ratis.leader.election.timeout", + type = ConfigType.TIME, + defaultValue = "1s", + tags = {SCM, OZONE, HA, RATIS}, + description = "The minimum timeout duration for SCM ratis leader" + + " election. Default is 1s." + ) + private long ratisLeaderElectionTimeout = 1 * 1000L; + + @Config(key = "ratis.server.failure.timeout.duration", + type = ConfigType.TIME, + defaultValue = "120s", + tags = {SCM, OZONE, HA, RATIS}, + description = "The timeout duration for ratis server failure" + + " detection, once the threshold has reached, the ratis state" + + " machine will be informed about the failure in the ratis ring." + ) + private long ratisNodeFailureTimeout = 120 * 1000L; + + @Config(key = "ratis.server.role.check.interval", + type = ConfigType.TIME, + defaultValue = "15s", + tags = {SCM, OZONE, HA, RATIS}, + description = "The interval between SCM leader performing a role" + + " check on its ratis server. Ratis server informs SCM if it loses" + + " the leader role. The scheduled check is an secondary check to" + + " ensure that the leader role is updated periodically" + ) + private long ratisRoleCheckerInterval = 15 * 1000L; + + public String getRatisStorageDir() { + return ratisStorageDir; + } + + public InetSocketAddress getRatisBindAddress() { + return NetUtils.createSocketAddr(ratisBindHost, ratisBindPort); + } + + public String getRatisRpcType() { + return ratisRpcType; + } + + public long getRaftSegmentSize() { + return raftSegmentSize; + } + + public long getRaftSegmentPreAllocatedSize() { + return raftSegmentPreAllocatedSize; + } + + public int getRaftLogAppenderQueueNum() { + return raftLogAppenderQueueNum; + } + + public int getRaftLogAppenderQueueByteLimit() { + return raftLogAppenderQueueByteLimit; + } + + public int getRaftLogPurgeGap() { + return raftLogPurgeGap; + } + + public long getRatisRetryCacheTimeout() { + return ratisRetryCacheTimeout; + } + + public long getRatisRequestTimeout() { + Preconditions.checkArgument(ratisRequestTimeout > 1000L, + "Ratis request timeout cannot be less than 1000ms."); + return ratisRequestTimeout; + } + + public long getRatisRequestMinTimeout() { + return ratisRequestTimeout - 1000L; + } + + public long getRatisRequestMaxTimeout() { + return ratisRequestTimeout + 1000L; + } + + public long getRatisLeaderElectionTimeout() { + return ratisLeaderElectionTimeout; + } + + public long getRatisNodeFailureTimeout() { + return ratisNodeFailureTimeout; + } + + public long getRatisRoleCheckerInterval() { + return ratisRoleCheckerInterval; + } +} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java new file mode 100644 index 000000000000..c78c6161ac9c --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ *

http://www.apache.org/licenses/LICENSE-2.0 + *

+ *

Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; + +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; +import org.apache.hadoop.hdds.scm.metadata.Replicate; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * InvocationHandler which checks for {@link Replicate} annotation and + * dispatches the request to Ratis Server. + */ +public class SCMHAInvocationHandler implements InvocationHandler { + + + private static final Logger LOG = LoggerFactory + .getLogger(SCMHAInvocationHandler.class); + + private final RequestType requestType; + private final Object localHandler; + private final SCMRatisServer ratisHandler; + + /** + * TODO. + */ + public SCMHAInvocationHandler(final RequestType requestType, + final Object localHandler, + final SCMRatisServer ratisHandler) { + this.requestType = requestType; + this.localHandler = localHandler; + this.ratisHandler = ratisHandler; + } + + @Override + public Object invoke(final Object proxy, final Method method, + final Object[] args) throws Throwable { + try { + long startTime = Time.monotonicNow(); + final Object result = method.isAnnotationPresent(Replicate.class) ? + invokeRatis(method, args) : invokeLocal(method, args); + LOG.debug("Call: {} took {} ms", method, Time.monotonicNow() - startTime); + return result; + } catch(InvocationTargetException iEx) { + throw iEx.getCause(); + } + } + + /** + * TODO. + */ + private Object invokeLocal(Method method, Object[] args) + throws InvocationTargetException, IllegalAccessException { + LOG.trace("Invoking method {} on target {}", method, localHandler); + return method.invoke(method, args); + } + + /** + * TODO. + */ + private Object invokeRatis(Method method, Object[] args) + throws Exception { + LOG.trace("Invoking method {} on target {}", method, ratisHandler); + final SCMRatisResponse response = ratisHandler.submitRequest( + SCMRatisRequest.of(requestType, method.getName(), args)); + if (response.isSuccess()) { + return response.getResult(); + } + // Should we unwrap and throw proper exception from here? + throw response.getException(); + } + +} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java new file mode 100644 index 000000000000..b38fc4365b8c --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ *

http://www.apache.org/licenses/LICENSE-2.0 + *

+ *

Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; + +import java.io.IOException; + +/** + * SCMHAManager provides HA service for SCM. + * + * It uses Apache Ratis for HA implementation. We will have a 2N+1 + * node Ratis ring. The Ratis ring will have one Leader node and 2N follower + * nodes. + * + * TODO + * + */ +public class SCMHAManager { + + private static boolean isLeader = true; + + private final SCMRatisServer ratisServer; + + /** + * Creates SCMHAManager instance. + */ + public SCMHAManager(final ConfigurationSource conf) throws IOException { + this.ratisServer = new SCMRatisServer( + conf.getObject(SCMHAConfiguration.class), conf); + } + + /** + * Starts HA service. + */ + public void start() throws IOException { + ratisServer.start(); + } + + /** + * Returns true if the current SCM is the leader. + */ + public static boolean isLeader() { + return isLeader; + } + + /** + * Returns RatisServer instance associated with the SCM instance. + */ + public SCMRatisServer getRatisServer() { + return ratisServer; + } + + /** + * Stops the HA service. + */ + public void shutdown() throws IOException { + ratisServer.stop(); + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java new file mode 100644 index 000000000000..d65c23502b58 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.List; + +import com.google.common.primitives.Ints; +import com.google.protobuf.GeneratedMessage; +import com.google.protobuf.InvalidProtocolBufferException; + +import com.google.protobuf.ByteString; +import com.google.protobuf.ProtocolMessageEnum; + +import org.apache.ratis.protocol.Message; + +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.Method; +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.MethodArgument; +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.SCMRatisRequestProto; + + +/** + * Represents the request that is sent to RatisServer. + */ +public final class SCMRatisRequest { + + private final RequestType type; + private final String operation; + private final Object[] arguments; + + private SCMRatisRequest(final RequestType type, final String operation, + final Object... arguments) { + this.type = type; + this.operation = operation; + this.arguments = arguments; + } + + public static SCMRatisRequest of(final RequestType type, + final String operation, + final Object... arguments) { + return new SCMRatisRequest(type, operation, arguments); + } + + /** + * Returns the type of request. + */ + public RequestType getType() { + return type; + } + + /** + * Returns the operation that this request represents. + */ + public String getOperation() { + return operation; + } + + /** + * Returns the arguments encoded in the request. + */ + public Object[] getArguments() { + return arguments.clone(); + } + + /** + * Encodes the request into Ratis Message. + */ + public Message encode() throws InvalidProtocolBufferException { + final SCMRatisRequestProto.Builder requestProtoBuilder = + SCMRatisRequestProto.newBuilder(); + requestProtoBuilder.setType(type); + + final Method.Builder methodBuilder = Method.newBuilder(); + methodBuilder.setName(operation); + + final List args = new ArrayList<>(); + for (Object argument : arguments) { + final MethodArgument.Builder argBuilder = MethodArgument.newBuilder(); + argBuilder.setType(argument.getClass().getCanonicalName()); + if (argument instanceof GeneratedMessage) { + argBuilder.setValue(((GeneratedMessage) argument).toByteString()); + } else if (argument instanceof ProtocolMessageEnum) { + argBuilder.setValue(ByteString.copyFrom(Ints.toByteArray( + ((ProtocolMessageEnum) argument).getNumber()))); + } else { + throw new InvalidProtocolBufferException(argument.getClass() + + " is not a protobuf object!"); + } + args.add(argBuilder.build()); + } + methodBuilder.addAllArgs(args); + return Message.valueOf( + org.apache.ratis.thirdparty.com.google.protobuf.ByteString.copyFrom( + requestProtoBuilder.build().toByteArray())); + } + + /** + * Decodes the request from Ratis Message. + */ + public static SCMRatisRequest decode(Message message) + throws InvalidProtocolBufferException { + final SCMRatisRequestProto requestProto = + SCMRatisRequestProto.parseFrom(message.getContent().toByteArray()); + final Method method = requestProto.getMethod(); + List args = new ArrayList<>(); + for (MethodArgument argument : method.getArgsList()) { + try { + final Class clazz = ReflectionUtil.getClass(argument.getType()); + if (GeneratedMessage.class.isAssignableFrom(clazz)) { + args.add(ReflectionUtil.getMethod(clazz, "parseFrom", byte[].class) + .invoke(null, (Object) argument.getValue().toByteArray())); + } else if (Enum.class.isAssignableFrom(clazz)) { + args.add(ReflectionUtil.getMethod(clazz, "valueOf", int.class) + .invoke(null, Ints.fromByteArray( + argument.getValue().toByteArray()))); + } else { + throw new InvalidProtocolBufferException(argument.getType() + + " is not a protobuf object!"); + } + } catch (ClassNotFoundException | NoSuchMethodException | + IllegalAccessException | InvocationTargetException ex) { + throw new InvalidProtocolBufferException(argument.getType() + + " cannot be decoded!" + ex.getMessage()); + } + } + return new SCMRatisRequest(requestProto.getType(), + method.getName(), args.toArray()); + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java new file mode 100644 index 000000000000..c4bedcc0e4c2 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import java.lang.reflect.InvocationTargetException; +import java.math.BigInteger; + +import com.google.protobuf.ByteString; +import com.google.protobuf.GeneratedMessage; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.ProtocolMessageEnum; +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.SCMRatisResponseProto; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientReply; + +/** + * Represents the response from RatisServer. + */ +public final class SCMRatisResponse { + + private final boolean success; + private final Object result; + private final Exception exception; + + private SCMRatisResponse(final Object result) { + this(true, result, null); + } + + private SCMRatisResponse(final Exception exception) { + this(false, null, exception); + } + + private SCMRatisResponse(final boolean success, final Object result, + final Exception exception) { + this.success = success; + this.result = result; + this.exception = exception; + } + + public boolean isSuccess() { + return success; + } + + public Object getResult() { + return result; + } + + public Exception getException() { + return exception; + } + + public static Message encode(final Object result) + throws InvalidProtocolBufferException { + + final ByteString value; + if (result instanceof GeneratedMessage) { + value = ((GeneratedMessage) result).toByteString(); + } else if (result instanceof ProtocolMessageEnum) { + value = ByteString.copyFrom(BigInteger.valueOf( + ((ProtocolMessageEnum) result).getNumber()).toByteArray()); + } else { + throw new InvalidProtocolBufferException(result.getClass() + + " is not a protobuf object!"); + } + + final SCMRatisResponseProto response = + SCMRatisResponseProto.newBuilder() + .setType(result.getClass().getCanonicalName()) + .setValue(value) + .build(); + return Message.valueOf( + org.apache.ratis.thirdparty.com.google.protobuf.ByteString.copyFrom( + response.toByteArray())); + } + + public static SCMRatisResponse decode(RaftClientReply reply) + throws InvalidProtocolBufferException { + return reply.isSuccess() ? + new SCMRatisResponse( + deserializeResult(reply.getMessage().getContent().toByteArray())) : + new SCMRatisResponse(reply.getException()); + } + + private static Object deserializeResult(byte[] response) + throws InvalidProtocolBufferException { + final SCMRatisResponseProto responseProto = + SCMRatisResponseProto.parseFrom(response); + try { + final Class clazz = ReflectionUtil.getClass(responseProto.getType()); + if (GeneratedMessage.class.isAssignableFrom(clazz)) { + return ReflectionUtil.getMethod(clazz, "parseFrom", byte[].class) + .invoke(null, (Object) responseProto.getValue().toByteArray()); + } + + if (Enum.class.isAssignableFrom(clazz)) { + return ReflectionUtil.getMethod(clazz, "valueOf", int.class) + .invoke(null, new BigInteger( + responseProto.getValue().toByteArray()).intValue()); + } + + throw new InvalidProtocolBufferException(responseProto.getType() + + " is not a protobuf object!"); + + } catch (ClassNotFoundException | NoSuchMethodException | + IllegalAccessException | InvocationTargetException ex) { + throw new InvalidProtocolBufferException(responseProto.getType() + + " cannot be decoded!" + ex.getMessage()); + } + + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java new file mode 100644 index 000000000000..209535d14a8f --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java @@ -0,0 +1,109 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; +import org.apache.ratis.conf.RaftProperties; +import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.protocol.RaftClientRequest; +import org.apache.ratis.protocol.RaftGroup; +import org.apache.ratis.protocol.RaftGroupId; +import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.server.RaftServer; + +/** + * TODO. + */ +public class SCMRatisServer { + + private final InetSocketAddress address; + private final RaftServer server; + private final RaftGroupId raftGroupId; + private final RaftGroup raftGroup; + private final RaftPeerId raftPeerId; + private final SCMStateMachine scmStateMachine; + private final ClientId clientId = ClientId.randomId(); + private final AtomicLong callId = new AtomicLong(); + + + // TODO: Refactor and remove ConfigurationSource and use only + // SCMHAConfiguration. + SCMRatisServer(final SCMHAConfiguration haConf, + final ConfigurationSource conf) + throws IOException { + final String scmServiceId = "SCM-HA-Service"; + final String scmNodeId = "localhost"; + this.raftPeerId = RaftPeerId.getRaftPeerId(scmNodeId); + this.address = haConf.getRatisBindAddress(); + final RaftPeer localRaftPeer = new RaftPeer(raftPeerId, address); + final List raftPeers = new ArrayList<>(); + raftPeers.add(localRaftPeer); + final RaftProperties serverProperties = RatisUtil + .newRaftProperties(haConf, conf); + this.raftGroupId = RaftGroupId.valueOf( + UUID.nameUUIDFromBytes(scmServiceId.getBytes(StandardCharsets.UTF_8))); + this.raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers); + this.scmStateMachine = new SCMStateMachine(); + this.server = RaftServer.newBuilder() + .setServerId(raftPeerId) + .setGroup(raftGroup) + .setProperties(serverProperties) + .setStateMachine(scmStateMachine) + .build(); + } + + void start() throws IOException { + server.start(); + } + + public void registerStateMachineHandler(final RequestType handlerType, + final Object handler) { + scmStateMachine.registerHandler(handlerType, handler); + } + + SCMRatisResponse submitRequest(SCMRatisRequest request) + throws IOException, ExecutionException, InterruptedException { + final RaftClientRequest raftClientRequest = new RaftClientRequest( + clientId, server.getId(), raftGroupId, nextCallId(), request.encode(), + RaftClientRequest.writeRequestType(), null); + final RaftClientReply raftClientReply = + server.submitClientRequestAsync(raftClientRequest).get(); + return SCMRatisResponse.decode(raftClientReply); + } + + private long nextCallId() { + return callId.getAndIncrement() & Long.MAX_VALUE; + } + + void stop() throws IOException { + server.close(); + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java new file mode 100644 index 000000000000..b10dd549587e --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ *

http://www.apache.org/licenses/LICENSE-2.0 + *

+ *

Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CompletableFuture; + +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.statemachine.TransactionContext; +import org.apache.ratis.statemachine.impl.BaseStateMachine; + +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; + +/** + * TODO. + */ +public class SCMStateMachine extends BaseStateMachine { + + private final Map handlers; + + public SCMStateMachine() { + this.handlers = new EnumMap<>(RequestType.class); + } + + public void registerHandler(RequestType type, Object handler) { + handlers.put(type, handler); + } + + @Override + public CompletableFuture applyTransaction( + final TransactionContext trx) { + final CompletableFuture applyTransactionFuture = + new CompletableFuture<>(); + try { + final SCMRatisRequest request = SCMRatisRequest.decode( + trx.getClientRequest().getMessage()); + applyTransactionFuture.complete(process(request)); + } catch (Exception ex) { + applyTransactionFuture.completeExceptionally(ex); + } + return applyTransactionFuture; + } + + private Message process(final SCMRatisRequest request) + throws Exception { + try { + final Object handler = handlers.get(request.getType()); + + if (handler == null) { + throw new IOException("No handler found for request type " + + request.getType()); + } + + final List> argumentTypes = new ArrayList<>(); + for(Object args : request.getArguments()) { + argumentTypes.add(args.getClass()); + } + final Object result = handler.getClass().getMethod( + request.getOperation(), argumentTypes.toArray(new Class[0])) + .invoke(handler, request.getArguments()); + + return SCMRatisResponse.encode(result); + } catch (NoSuchMethodException | SecurityException ex) { + throw new InvalidProtocolBufferException(ex.getMessage()); + } catch (InvocationTargetException e) { + final Exception targetEx = (Exception) e.getTargetException(); + throw targetEx != null ? targetEx : e; + } + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/Replicate.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/Replicate.java new file mode 100644 index 000000000000..aeed57cd4a52 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/Replicate.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ *

http://www.apache.org/licenses/LICENSE-2.0 + *

+ *

Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.metadata; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Inherited; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * TODO: Add javadoc. + */ +@Inherited +@Target(ElementType.METHOD) +@Retention(RetentionPolicy.RUNTIME) +public @interface Replicate { +} diff --git a/hadoop-hdds/server-scm/src/main/proto/SCMRatisProtocol.proto b/hadoop-hdds/server-scm/src/main/proto/SCMRatisProtocol.proto new file mode 100644 index 000000000000..1107016fcd09 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/proto/SCMRatisProtocol.proto @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +option java_package = "org.apache.hadoop.hdds.protocol.proto"; +option java_outer_classname = "SCMRatisProtocol"; +option java_generate_equals_and_hash = true; + +enum RequestType { + PIPELINE = 1; + CONTAINER = 2; +} + +message Method { + required string name = 1; + repeated MethodArgument args = 2; +} + +message MethodArgument { + required string type = 1; + required bytes value = 2; +} + +message SCMRatisRequestProto { + required RequestType type = 1; + required Method method = 2; +} + +message SCMRatisResponseProto { + required string type = 2; + required bytes value = 3; +} From 30e1751383512dd834e4eca32b56cf1b16e706d9 Mon Sep 17 00:00:00 2001 From: Nandakumar Date: Thu, 28 May 2020 14:38:58 +0530 Subject: [PATCH 06/51] HDDS-3192. Handle AllocateContainer operation for HA. (#975) --- .../scm/container/ContainerManagerImpl.java | 19 +- .../scm/container/ContainerManagerV2.java | 5 +- .../container/ContainerStateManagerImpl.java | 140 +++++------ .../container/ContainerStateManagerV2.java | 76 +++++- .../container/states/ContainerStateMap.java | 1 + .../hdds/scm/ha/SCMHAInvocationHandler.java | 6 +- .../hadoop/hdds/scm/ha/SCMHAManager.java | 40 +-- .../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 78 ++++++ .../hadoop/hdds/scm/ha/SCMRatisResponse.java | 8 + .../hadoop/hdds/scm/ha/SCMRatisServer.java | 85 +------ .../hdds/scm/ha/SCMRatisServerImpl.java | 113 +++++++++ .../container/TestContainerManagerImpl.java | 91 +++++++ .../hadoop/hdds/scm/ha/MockSCMHAManager.java | 148 ++++++++++++ .../scm/pipeline/MockPipelineManager.java | 228 ++++++++++++++++++ 14 files changed, 823 insertions(+), 215 deletions(-) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 0404530b2f1f..36b9a308e8fe 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -47,6 +47,10 @@ */ public class ContainerManagerImpl implements ContainerManagerV2 { + /* + * TODO: Introduce container level locks. + */ + /** * */ @@ -72,17 +76,18 @@ public class ContainerManagerImpl implements ContainerManagerV2 { * */ public ContainerManagerImpl( - // Introduce builder for this class? - final Configuration conf, final PipelineManager pipelineManager, - final SCMHAManager scmhaManager, + final Configuration conf, + final SCMHAManager scmHaManager, + final PipelineManager pipelineManager, final Table containerStore) throws IOException { + // Introduce builder for this class? this.lock = new ReentrantReadWriteLock(); this.pipelineManager = pipelineManager; - this.containerStateManager = ContainerStateManagerImpl.newBuilder() + this.containerStateManager = ContainerStateManagerImpl.newBuilder() .setConfiguration(conf) .setPipelineManager(pipelineManager) - .setRatisServer(scmhaManager.getRatisServer()) + .setRatisServer(scmHaManager.getRatisServer()) .setContainerStore(containerStore) .build(); } @@ -275,8 +280,8 @@ public void notifyContainerReportProcessing(final boolean isFullReport, } @Override - public void close() throws IOException { - throw new UnsupportedOperationException("Not yet implemented!"); + public void close() throws Exception { + containerStateManager.close(); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java index 37c7b709d458..863ca4da66b9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.hdds.scm.container; -import java.io.Closeable; import java.io.IOException; import java.util.Collections; import java.util.List; @@ -36,8 +35,8 @@ * mapping. This is used by SCM when allocating new locations and when * looking up a key. */ -public interface ContainerManagerV2 extends Closeable { - +public interface ContainerManagerV2 extends AutoCloseable { + // TODO: Rename this to ContainerManager /** * Returns all the container Ids managed by ContainerManager. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java index 16fe3407bde4..4f4456ace47d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java @@ -18,7 +18,6 @@ package org.apache.hadoop.hdds.scm.container; import java.io.IOException; - import java.lang.reflect.Proxy; import java.util.HashSet; import java.util.Map; @@ -27,128 +26,92 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicLong; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; -import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; -import org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler; -import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; -import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.hdds.utils.db.Table.KeyValue; -import org.apache.hadoop.hdds.utils.db.TableIterator; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.states.ContainerState; import org.apache.hadoop.hdds.scm.container.states.ContainerStateMap; +import org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler; +import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.Table.KeyValue; +import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.common.statemachine.StateMachine; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + /** - * TODO: Add javadoc. + * Default implementation of ContainerStateManager. This implementation + * holds the Container States in-memory which is backed by a persistent store. + * The persistent store is always kept in sync with the in-memory state changes. */ public final class ContainerStateManagerImpl implements ContainerStateManagerV2 { - /* ********************************************************************** - * Container Life Cycle * - * * - * Event and State Transition Mapping: * - * * - * State: OPEN ----------------> CLOSING * - * Event: FINALIZE * - * * - * State: CLOSING ----------------> QUASI_CLOSED * - * Event: QUASI_CLOSE * - * * - * State: CLOSING ----------------> CLOSED * - * Event: CLOSE * - * * - * State: QUASI_CLOSED ----------------> CLOSED * - * Event: FORCE_CLOSE * - * * - * State: CLOSED ----------------> DELETING * - * Event: DELETE * - * * - * State: DELETING ----------------> DELETED * - * Event: CLEANUP * - * * - * * - * Container State Flow: * - * * - * [OPEN]--------------->[CLOSING]--------------->[QUASI_CLOSED] * - * (FINALIZE) | (QUASI_CLOSE) | * - * | | * - * | | * - * (CLOSE) | (FORCE_CLOSE) | * - * | | * - * | | * - * +--------->[CLOSED]<--------+ * - * | * - * (DELETE)| * - * | * - * | * - * [DELETING] * - * | * - * (CLEANUP) | * - * | * - * V * - * [DELETED] * - * * - ************************************************************************/ - /** - * + * Logger instance of ContainerStateManagerImpl. */ private static final Logger LOG = LoggerFactory.getLogger( ContainerStateManagerImpl.class); /** - * + * Configured container size. */ private final long containerSize; /** - * + * The container ID sequence which is used to create new container. + * This will be removed once we have a Distributed Sequence ID Generator. */ + @Deprecated private final AtomicLong nextContainerID; /** - * + * In-memory representation of Container States. */ private final ContainerStateMap containers; /** - * + * Persistent store for Container States. */ - private final PipelineManager pipelineManager; + private Table containerStore; /** - * + * PipelineManager instance. */ - private Table containerStore; + private final PipelineManager pipelineManager; /** - * + * Container lifecycle state machine. */ private final StateMachine stateMachine; /** - * + * We use the containers in round-robin fashion for operations like block + * allocation. This map is used for remembering the last used container. */ private final ConcurrentHashMap lastUsedMap; /** + * constructs ContainerStateManagerImpl instance and loads the containers + * form the persistent storage. * + * @param conf the Configuration + * @param pipelineManager the {@link PipelineManager} instance + * @param containerStore the persistent storage + * @throws IOException in case of error while loading the containers */ private ContainerStateManagerImpl(final Configuration conf, final PipelineManager pipelineManager, @@ -158,7 +121,7 @@ private ContainerStateManagerImpl(final Configuration conf, this.containerStore = containerStore; this.stateMachine = newStateMachine(); this.containerSize = getConfiguredContainerSize(conf); - this.nextContainerID = new AtomicLong(); + this.nextContainerID = new AtomicLong(1L); this.containers = new ContainerStateMap(); this.lastUsedMap = new ConcurrentHashMap<>(); @@ -166,7 +129,9 @@ private ContainerStateManagerImpl(final Configuration conf, } /** + * Creates and initializes a new Container Lifecycle StateMachine. * + * @return the Container Lifecycle StateMachine */ private StateMachine newStateMachine() { @@ -208,7 +173,9 @@ private StateMachine newStateMachine() { } /** + * Returns the configured container size. * + * @return the max size of container */ private long getConfiguredContainerSize(final Configuration conf) { return (long) conf.getStorageSize( @@ -218,7 +185,9 @@ private long getConfiguredContainerSize(final Configuration conf) { } /** + * Loads the containers from container store into memory. * + * @throws IOException in case of error while loading the containers */ private void initialize() throws IOException { TableIterator> @@ -282,16 +251,20 @@ public void addContainer(final ContainerInfoProto containerInfo) Preconditions.checkNotNull(containerInfo); final ContainerInfo container = ContainerInfo.fromProtobuf(containerInfo); - if (getContainer(container.containerID()) == null) { - Preconditions.checkArgument(nextContainerID.get() - == container.containerID().getId(), - "ContainerID mismatch."); - - pipelineManager.addContainerToPipeline( - container.getPipelineID(), container.containerID()); - containers.addContainer(container); - nextContainerID.incrementAndGet(); - } + final ContainerID containerID = container.containerID(); + final PipelineID pipelineID = container.getPipelineID(); + + /* + * TODO: + * Check if the container already exist in in ContainerStateManager. + * This optimization can be done after moving ContainerNotFoundException + * from ContainerStateMap to ContainerManagerImpl. + */ + + containerStore.put(containerID, container); + containers.addContainer(container); + pipelineManager.addContainerToPipeline(pipelineID, containerID); + nextContainerID.incrementAndGet(); } void updateContainerState(final ContainerID containerID, @@ -337,7 +310,9 @@ void removeContainer(final ContainerID containerID) throw new UnsupportedOperationException("Not yet implemented!"); } - void close() throws IOException { + @Override + public void close() throws Exception { + containerStore.close(); } public static Builder newBuilder() { @@ -382,7 +357,6 @@ public ContainerStateManagerV2 build() throws IOException { final ContainerStateManagerV2 csm = new ContainerStateManagerImpl( conf, pipelineMgr, table); - scmRatisServer.registerStateMachineHandler(RequestType.CONTAINER, csm); final SCMHAInvocationHandler invocationHandler = new SCMHAInvocationHandler(RequestType.CONTAINER, csm, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java index 9960354be402..3520b0146e23 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java @@ -17,20 +17,26 @@ package org.apache.hadoop.hdds.scm.container; +import java.io.IOException; +import java.util.Set; + import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.scm.metadata.Replicate; -import java.io.IOException; -import java.util.Set; - /** + * A ContainerStateManager is responsible for keeping track of all the + * container and its state inside SCM, it also exposes methods to read and + * modify the container and its state. * - * TODO: Add proper javadoc. + * All the mutation operations are marked with {@link Replicate} annotation so + * that when SCM-HA is enabled, the mutations are replicated from leader SCM + * to the followers. * - * Implementation of methods marked with {@code @Replicate} annotation should be + * When a method is marked with {@link Replicate} annotation it should follow + * the below rules. * - * 1. Idempotent + * 1. The method call should be Idempotent * 2. Arguments should be of protobuf objects * 3. Return type should be of protobuf object * 4. The declaration should throw RaftException @@ -38,13 +44,65 @@ */ public interface ContainerStateManagerV2 { + //TODO: Rename this to ContainerStateManager + + /* ********************************************************************** + * Container Life Cycle * + * * + * Event and State Transition Mapping: * + * * + * State: OPEN ----------------> CLOSING * + * Event: FINALIZE * + * * + * State: CLOSING ----------------> QUASI_CLOSED * + * Event: QUASI_CLOSE * + * * + * State: CLOSING ----------------> CLOSED * + * Event: CLOSE * + * * + * State: QUASI_CLOSED ----------------> CLOSED * + * Event: FORCE_CLOSE * + * * + * State: CLOSED ----------------> DELETING * + * Event: DELETE * + * * + * State: DELETING ----------------> DELETED * + * Event: CLEANUP * + * * + * * + * Container State Flow: * + * * + * [OPEN]--------------->[CLOSING]--------------->[QUASI_CLOSED] * + * (FINALIZE) | (QUASI_CLOSE) | * + * | | * + * | | * + * (CLOSE) | (FORCE_CLOSE) | * + * | | * + * | | * + * +--------->[CLOSED]<--------+ * + * | * + * (DELETE)| * + * | * + * | * + * [DELETING] * + * | * + * (CLEANUP) | * + * | * + * V * + * [DELETED] * + * * + ************************************************************************/ + /** - * + * Returns a new container ID which can be used for allocating a new + * container. */ ContainerID getNextContainerID(); /** + * Returns the ID of all the managed containers. * + * @return Set of {@link ContainerID} */ Set getContainerIDs(); @@ -72,4 +130,8 @@ Set getContainerReplicas(ContainerID containerID) void addContainer(ContainerInfoProto containerInfo) throws IOException; + /** + * + */ + void close() throws Exception; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java index 8cef966995eb..d71049b7052e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java @@ -535,6 +535,7 @@ private void flushCache(final ContainerInfo... containerInfos) { } } + // TODO: Move container not found exception to upper layer. private void checkIfContainerExist(ContainerID containerID) throws ContainerNotFoundException { if (!containerMap.containsKey(containerID)) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java index c78c6161ac9c..cbe2ce38ef41 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAInvocationHandler.java @@ -50,6 +50,7 @@ public SCMHAInvocationHandler(final RequestType requestType, this.requestType = requestType; this.localHandler = localHandler; this.ratisHandler = ratisHandler; + ratisHandler.registerStateMachineHandler(requestType, localHandler); } @Override @@ -71,8 +72,9 @@ public Object invoke(final Object proxy, final Method method, */ private Object invokeLocal(Method method, Object[] args) throws InvocationTargetException, IllegalAccessException { - LOG.trace("Invoking method {} on target {}", method, localHandler); - return method.invoke(method, args); + LOG.trace("Invoking method {} on target {} with arguments {}", + method, localHandler, args); + return method.invoke(localHandler, args); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index b38fc4365b8c..eb6c8006c5e3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -17,60 +17,30 @@ package org.apache.hadoop.hdds.scm.ha; -import org.apache.hadoop.hdds.conf.ConfigurationSource; - import java.io.IOException; /** * SCMHAManager provides HA service for SCM. - * - * It uses Apache Ratis for HA implementation. We will have a 2N+1 - * node Ratis ring. The Ratis ring will have one Leader node and 2N follower - * nodes. - * - * TODO - * */ -public class SCMHAManager { - - private static boolean isLeader = true; - - private final SCMRatisServer ratisServer; - - /** - * Creates SCMHAManager instance. - */ - public SCMHAManager(final ConfigurationSource conf) throws IOException { - this.ratisServer = new SCMRatisServer( - conf.getObject(SCMHAConfiguration.class), conf); - } +public interface SCMHAManager { /** * Starts HA service. */ - public void start() throws IOException { - ratisServer.start(); - } + void start() throws IOException; /** * Returns true if the current SCM is the leader. */ - public static boolean isLeader() { - return isLeader; - } + boolean isLeader(); /** * Returns RatisServer instance associated with the SCM instance. */ - public SCMRatisServer getRatisServer() { - return ratisServer; - } + SCMRatisServer getRatisServer(); /** * Stops the HA service. */ - public void shutdown() throws IOException { - ratisServer.stop(); - } - + void shutdown() throws IOException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java new file mode 100644 index 000000000000..89ac714993c7 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ *

http://www.apache.org/licenses/LICENSE-2.0 + *

+ *

Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; + +import java.io.IOException; + +/** + * SCMHAManagerImpl uses Apache Ratis for HA implementation. We will have 2N+1 + * node Ratis ring. The Ratis ring will have one Leader node and 2N follower + * nodes. + * + * TODO + * + */ +public class SCMHAManagerImpl implements SCMHAManager { + + private static boolean isLeader = true; + + private final SCMRatisServerImpl ratisServer; + + /** + * Creates SCMHAManager instance. + */ + public SCMHAManagerImpl(final ConfigurationSource conf) throws IOException { + this.ratisServer = new SCMRatisServerImpl( + conf.getObject(SCMHAConfiguration.class), conf); + } + + /** + * {@inheritDoc} + */ + @Override + public void start() throws IOException { + ratisServer.start(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isLeader() { + return isLeader; + } + + /** + * {@inheritDoc} + */ + @Override + public SCMRatisServer getRatisServer() { + return ratisServer; + } + + /** + * {@inheritDoc} + */ + @Override + public void shutdown() throws IOException { + ratisServer.stop(); + } + +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java index c4bedcc0e4c2..21ca4be50841 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java @@ -67,6 +67,10 @@ public Exception getException() { public static Message encode(final Object result) throws InvalidProtocolBufferException { + if (result == null) { + return Message.EMPTY; + } + final ByteString value; if (result instanceof GeneratedMessage) { value = ((GeneratedMessage) result).toByteString(); @@ -98,6 +102,10 @@ public static SCMRatisResponse decode(RaftClientReply reply) private static Object deserializeResult(byte[] response) throws InvalidProtocolBufferException { + if (response.length == 0) { + return null; + } + final SCMRatisResponseProto responseProto = SCMRatisResponseProto.parseFrom(response); try { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java index 209535d14a8f..4ddbc7b63ac9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java @@ -17,93 +17,22 @@ package org.apache.hadoop.hdds.scm.ha; +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; + import java.io.IOException; -import java.net.InetSocketAddress; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.List; -import java.util.UUID; import java.util.concurrent.ExecutionException; -import java.util.concurrent.atomic.AtomicLong; - -import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; -import org.apache.ratis.conf.RaftProperties; -import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.RaftClientReply; -import org.apache.ratis.protocol.RaftClientRequest; -import org.apache.ratis.protocol.RaftGroup; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.protocol.RaftPeer; -import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.server.RaftServer; /** * TODO. */ -public class SCMRatisServer { - - private final InetSocketAddress address; - private final RaftServer server; - private final RaftGroupId raftGroupId; - private final RaftGroup raftGroup; - private final RaftPeerId raftPeerId; - private final SCMStateMachine scmStateMachine; - private final ClientId clientId = ClientId.randomId(); - private final AtomicLong callId = new AtomicLong(); +public interface SCMRatisServer { + void start() throws IOException; - // TODO: Refactor and remove ConfigurationSource and use only - // SCMHAConfiguration. - SCMRatisServer(final SCMHAConfiguration haConf, - final ConfigurationSource conf) - throws IOException { - final String scmServiceId = "SCM-HA-Service"; - final String scmNodeId = "localhost"; - this.raftPeerId = RaftPeerId.getRaftPeerId(scmNodeId); - this.address = haConf.getRatisBindAddress(); - final RaftPeer localRaftPeer = new RaftPeer(raftPeerId, address); - final List raftPeers = new ArrayList<>(); - raftPeers.add(localRaftPeer); - final RaftProperties serverProperties = RatisUtil - .newRaftProperties(haConf, conf); - this.raftGroupId = RaftGroupId.valueOf( - UUID.nameUUIDFromBytes(scmServiceId.getBytes(StandardCharsets.UTF_8))); - this.raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers); - this.scmStateMachine = new SCMStateMachine(); - this.server = RaftServer.newBuilder() - .setServerId(raftPeerId) - .setGroup(raftGroup) - .setProperties(serverProperties) - .setStateMachine(scmStateMachine) - .build(); - } - - void start() throws IOException { - server.start(); - } - - public void registerStateMachineHandler(final RequestType handlerType, - final Object handler) { - scmStateMachine.registerHandler(handlerType, handler); - } + void registerStateMachineHandler(RequestType handlerType, Object handler); SCMRatisResponse submitRequest(SCMRatisRequest request) - throws IOException, ExecutionException, InterruptedException { - final RaftClientRequest raftClientRequest = new RaftClientRequest( - clientId, server.getId(), raftGroupId, nextCallId(), request.encode(), - RaftClientRequest.writeRequestType(), null); - final RaftClientReply raftClientReply = - server.submitClientRequestAsync(raftClientRequest).get(); - return SCMRatisResponse.decode(raftClientReply); - } - - private long nextCallId() { - return callId.getAndIncrement() & Long.MAX_VALUE; - } - - void stop() throws IOException { - server.close(); - } + throws IOException, ExecutionException, InterruptedException; + void stop() throws IOException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java new file mode 100644 index 000000000000..45ae212ebb66 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java @@ -0,0 +1,113 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import java.io.IOException; +import java.net.InetSocketAddress; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicLong; + +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; +import org.apache.ratis.conf.RaftProperties; +import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.protocol.RaftClientRequest; +import org.apache.ratis.protocol.RaftGroup; +import org.apache.ratis.protocol.RaftGroupId; +import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.server.RaftServer; + +/** + * TODO. + */ +public class SCMRatisServerImpl implements SCMRatisServer { + + private final InetSocketAddress address; + private final RaftServer server; + private final RaftGroupId raftGroupId; + private final RaftGroup raftGroup; + private final RaftPeerId raftPeerId; + private final SCMStateMachine scmStateMachine; + private final ClientId clientId = ClientId.randomId(); + private final AtomicLong callId = new AtomicLong(); + + + // TODO: Refactor and remove ConfigurationSource and use only + // SCMHAConfiguration. + SCMRatisServerImpl(final SCMHAConfiguration haConf, + final ConfigurationSource conf) + throws IOException { + final String scmServiceId = "SCM-HA-Service"; + final String scmNodeId = "localhost"; + this.raftPeerId = RaftPeerId.getRaftPeerId(scmNodeId); + this.address = haConf.getRatisBindAddress(); + final RaftPeer localRaftPeer = new RaftPeer(raftPeerId, address); + final List raftPeers = new ArrayList<>(); + raftPeers.add(localRaftPeer); + final RaftProperties serverProperties = RatisUtil + .newRaftProperties(haConf, conf); + this.raftGroupId = RaftGroupId.valueOf( + UUID.nameUUIDFromBytes(scmServiceId.getBytes(StandardCharsets.UTF_8))); + this.raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers); + this.scmStateMachine = new SCMStateMachine(); + this.server = RaftServer.newBuilder() + .setServerId(raftPeerId) + .setGroup(raftGroup) + .setProperties(serverProperties) + .setStateMachine(scmStateMachine) + .build(); + } + + @Override + public void start() throws IOException { + server.start(); + } + + @Override + public void registerStateMachineHandler(final RequestType handlerType, + final Object handler) { + scmStateMachine.registerHandler(handlerType, handler); + } + + @Override + public SCMRatisResponse submitRequest(SCMRatisRequest request) + throws IOException, ExecutionException, InterruptedException { + final RaftClientRequest raftClientRequest = new RaftClientRequest( + clientId, server.getId(), raftGroupId, nextCallId(), request.encode(), + RaftClientRequest.writeRequestType(), null); + final RaftClientReply raftClientReply = + server.submitClientRequestAsync(raftClientRequest).get(); + return SCMRatisResponse.decode(raftClientReply); + } + + private long nextCallId() { + return callId.getAndIncrement() & Long.MAX_VALUE; + } + + @Override + public void stop() throws IOException { + server.close(); + } + +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java new file mode 100644 index 000000000000..022d3921df0b --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.container; + +import java.io.File; +import java.util.UUID; + +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; +import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; +import org.apache.hadoop.hdds.scm.pipeline.MockPipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + + + +/** + * Tests to verify the functionality of ContainerManager. + */ +public class TestContainerManagerImpl { + + private File testDir; + private DBStore dbStore; + private ContainerManagerV2 containerManager; + + @Before + public void setUp() throws Exception { + final OzoneConfiguration conf = SCMTestUtils.getConf(); + testDir = GenericTestUtils.getTestDir( + TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + dbStore = DBStoreBuilder.createDBStore( + conf, new SCMDBDefinition()); + final PipelineManager pipelineManager = MockPipelineManager.getInstance(); + pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + containerManager = new ContainerManagerImpl(conf, + MockSCMHAManager.getInstance(), pipelineManager, + SCMDBDefinition.CONTAINERS.getTable(dbStore)); + } + + @After + public void cleanup() throws Exception { + if(containerManager != null) { + containerManager.close(); + } + + if (dbStore != null) { + dbStore.close(); + } + + FileUtil.fullyDelete(testDir); + } + + @Test + public void testAllocateContainer() throws Exception { + Assert.assertTrue(containerManager.getContainerIDs().isEmpty()); + final ContainerInfo container = containerManager.allocateContainer( + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, "admin"); + Assert.assertEquals(1, containerManager.getContainerIDs().size()); + Assert.assertNotNull(containerManager.getContainer( + container.containerID())); + } + +} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java new file mode 100644 index 000000000000..c3b14fb405bb --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.util.ArrayList; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; + +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; +import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.protocol.RaftGroupId; +import org.apache.ratis.protocol.RaftGroupMemberId; +import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.protocol.StateMachineException; + +/** + * Mock SCMHAManager implementation for testing. + */ +public final class MockSCMHAManager implements SCMHAManager { + + private final SCMRatisServer ratisServer; + + public static SCMHAManager getInstance() { + return new MockSCMHAManager(); + } + + /** + * Creates MockSCMHAManager instance. + */ + private MockSCMHAManager() { + this.ratisServer = new MockRatisServer(); + } + + @Override + public void start() throws IOException { + ratisServer.start(); + } + + /** + * {@inheritDoc} + */ + @Override + public boolean isLeader() { + return true; + } + + /** + * {@inheritDoc} + */ + @Override + public SCMRatisServer getRatisServer() { + return ratisServer; + } + + /** + * {@inheritDoc} + */ + @Override + public void shutdown() throws IOException { + ratisServer.stop(); + } + + private static class MockRatisServer implements SCMRatisServer { + + private Map handlers = + new EnumMap<>(RequestType.class); + + @Override + public void start() { + } + + @Override + public void registerStateMachineHandler(final RequestType handlerType, + final Object handler) { + handlers.put(handlerType, handler); + } + + @Override + public SCMRatisResponse submitRequest(final SCMRatisRequest request) + throws IOException { + final RaftGroupMemberId raftId = RaftGroupMemberId.valueOf( + RaftPeerId.valueOf("peer"), RaftGroupId.randomId()); + RaftClientReply reply; + try { + final Message result = process(request); + return SCMRatisResponse.decode(new RaftClientReply(ClientId.randomId(), + raftId, 1L, true, result, null, 1L, null)); + } catch (Exception ex) { + return SCMRatisResponse.decode(new RaftClientReply(ClientId.randomId(), + raftId, 1L, false, null, + new StateMachineException(raftId, ex), 1L, null)); + } + } + + private Message process(final SCMRatisRequest request) + throws Exception { + try { + final Object handler = handlers.get(request.getType()); + + if (handler == null) { + throw new IOException("No handler found for request type " + + request.getType()); + } + + final List> argumentTypes = new ArrayList<>(); + for(Object args : request.getArguments()) { + argumentTypes.add(args.getClass()); + } + final Object result = handler.getClass().getMethod( + request.getOperation(), argumentTypes.toArray(new Class[0])) + .invoke(handler, request.getArguments()); + + return SCMRatisResponse.encode(result); + } catch (NoSuchMethodException | SecurityException ex) { + throw new InvalidProtocolBufferException(ex.getMessage()); + } catch (InvocationTargetException e) { + final Exception targetEx = (Exception) e.getTargetException(); + throw targetEx != null ? targetEx : e; + } + } + + @Override + public void stop() { + } + } + +} \ No newline at end of file diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java new file mode 100644 index 000000000000..5dd60824838b --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java @@ -0,0 +1,228 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.pipeline; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; +import org.apache.hadoop.hdds.server.events.EventPublisher; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +/** + * Mock PipelineManager implementation for testing. + */ +public final class MockPipelineManager implements PipelineManager { + + private PipelineStateManager stateManager; + + public static PipelineManager getInstance() { + return new MockPipelineManager(); + } + + private MockPipelineManager() { + this.stateManager = new PipelineStateManager(); + } + + @Override + public Pipeline createPipeline(final ReplicationType type, + final ReplicationFactor factor) + throws IOException { + final List nodes = Stream.generate( + MockDatanodeDetails::randomDatanodeDetails) + .limit(factor.getNumber()).collect(Collectors.toList()); + final Pipeline pipeline = Pipeline.newBuilder() + .setId(PipelineID.randomId()) + .setType(type) + .setFactor(factor) + .setNodes(nodes) + .setState(Pipeline.PipelineState.OPEN) + .build(); + stateManager.addPipeline(pipeline); + return pipeline; + } + + @Override + public Pipeline createPipeline(final ReplicationType type, + final ReplicationFactor factor, + final List nodes) { + return Pipeline.newBuilder() + .setId(PipelineID.randomId()) + .setType(type) + .setFactor(factor) + .setNodes(nodes) + .setState(Pipeline.PipelineState.OPEN) + .build(); + } + + @Override + public Pipeline getPipeline(final PipelineID pipelineID) + throws PipelineNotFoundException { + return stateManager.getPipeline(pipelineID); + } + + @Override + public boolean containsPipeline(final PipelineID pipelineID) { + try { + return stateManager.getPipeline(pipelineID) != null; + } catch (PipelineNotFoundException e) { + return false; + } + } + + @Override + public List getPipelines() { + return stateManager.getPipelines(); + } + + @Override + public List getPipelines(final ReplicationType type) { + return stateManager.getPipelines(type); + } + + @Override + public List getPipelines(final ReplicationType type, + final ReplicationFactor factor) { + return stateManager.getPipelines(type, factor); + } + + @Override + public List getPipelines(final ReplicationType type, + final Pipeline.PipelineState state) { + return stateManager.getPipelines(type, state); + } + + @Override + public List getPipelines(final ReplicationType type, + final ReplicationFactor factor, + final Pipeline.PipelineState state) { + return stateManager.getPipelines(type, factor, state); + } + + @Override + public List getPipelines(final ReplicationType type, + final ReplicationFactor factor, final Pipeline.PipelineState state, + final Collection excludeDns, + final Collection excludePipelines) { + return stateManager.getPipelines(type, factor, state, + excludeDns, excludePipelines); + } + + @Override + public void addContainerToPipeline(final PipelineID pipelineID, + final ContainerID containerID) + throws IOException { + stateManager.addContainerToPipeline(pipelineID, containerID); + } + + @Override + public void removeContainerFromPipeline(final PipelineID pipelineID, + final ContainerID containerID) + throws IOException { + stateManager.removeContainerFromPipeline(pipelineID, containerID); + } + + @Override + public NavigableSet getContainersInPipeline( + final PipelineID pipelineID) throws IOException { + return getContainersInPipeline(pipelineID); + } + + @Override + public int getNumberOfContainers(final PipelineID pipelineID) + throws IOException { + return getContainersInPipeline(pipelineID).size(); + } + + @Override + public void openPipeline(final PipelineID pipelineId) + throws IOException { + stateManager.openPipeline(pipelineId); + } + + @Override + public void finalizeAndDestroyPipeline(final Pipeline pipeline, + final boolean onTimeout) + throws IOException { + stateManager.finalizePipeline(pipeline.getId()); + } + + @Override + public void scrubPipeline(final ReplicationType type, + final ReplicationFactor factor) + throws IOException { + + } + + @Override + public void startPipelineCreator() { + + } + + @Override + public void triggerPipelineCreation() { + + } + + @Override + public void incNumBlocksAllocatedMetric(final PipelineID id) { + + } + + @Override + public void activatePipeline(final PipelineID pipelineID) + throws IOException { + + } + + @Override + public void deactivatePipeline(final PipelineID pipelineID) + throws IOException { + stateManager.deactivatePipeline(pipelineID); + } + + @Override + public boolean getSafeModeStatus() { + return false; + } + + @Override + public void close() throws IOException { + + } + + @Override + public Map getPipelineInfo() { + return null; + } + + @Override + public void onMessage(final SCMSafeModeManager.SafeModeStatus safeModeStatus, + final EventPublisher publisher) { + + } +} \ No newline at end of file From c8367208276b4137261992f3ff395f22e7db34b0 Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Tue, 2 Jun 2020 02:18:37 +0800 Subject: [PATCH 07/51] HDDS-3196 New PipelineManager interface to persist to RatisServer. (#980) --- .../scm/pipeline/PipelineManagerV2Impl.java | 617 ++++++++++++++++++ .../scm/pipeline/PipelineStateManagerV2.java | 99 +++ .../pipeline/PipelineStateManagerV2Impl.java | 226 +++++++ .../hdds/scm/pipeline/RatisPipelineUtils.java | 19 + 4 files changed, 961 insertions(+) create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2.java create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java new file mode 100644 index 000000000000..a6a3249fc7dc --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -0,0 +1,617 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.pipeline; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; +import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.hadoop.hdds.utils.Scheduler; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.metrics2.util.MBeans; +import org.apache.hadoop.util.Time; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.management.ObjectName; +import java.io.IOException; +import java.time.Duration; +import java.time.Instant; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.stream.Collectors; + +/** + * SCM Pipeline Manager implementation. + * All the write operations for pipelines must come via PipelineManager. + * It synchronises all write and read operations via a ReadWriteLock. + */ +public class PipelineManagerV2Impl implements PipelineManager { + private static final Logger LOG = + LoggerFactory.getLogger(SCMPipelineManager.class); + + private final ReadWriteLock lock; + private PipelineFactory pipelineFactory; + private PipelineStateManagerV2 stateManager; + private Scheduler scheduler; + private BackgroundPipelineCreator backgroundPipelineCreator; + private final ConfigurationSource conf; + // Pipeline Manager MXBean + private ObjectName pmInfoBean; + private final SCMPipelineMetrics metrics; + private long pipelineWaitDefaultTimeout; + private final AtomicBoolean isInSafeMode; + // Used to track if the safemode pre-checks have completed. This is designed + // to prevent pipelines being created until sufficient nodes have registered. + private final AtomicBoolean pipelineCreationAllowed; + + public PipelineManagerV2Impl(ConfigurationSource conf, + NodeManager nodeManager, + PipelineStateManagerV2 pipelineStateManager, + PipelineFactory pipelineFactory) { + this.lock = new ReentrantReadWriteLock(); + this.pipelineFactory = pipelineFactory; + this.stateManager = pipelineStateManager; + this.conf = conf; + this.pmInfoBean = MBeans.register("SCMPipelineManager", + "SCMPipelineManagerInfo", this); + this.metrics = SCMPipelineMetrics.create(); + this.pipelineWaitDefaultTimeout = conf.getTimeDuration( + HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, + HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL_DEFAULT, + TimeUnit.MILLISECONDS); + this.isInSafeMode = new AtomicBoolean(conf.getBoolean( + HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, + HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED_DEFAULT)); + // Pipeline creation is only allowed after the safemode prechecks have + // passed, eg sufficient nodes have registered. + this.pipelineCreationAllowed = new AtomicBoolean(!this.isInSafeMode.get()); + } + + public static PipelineManager newPipelineManager( + ConfigurationSource conf, SCMHAManager scmhaManager, + NodeManager nodeManager, Table pipelineStore, + PipelineFactory pipelineFactory) throws IOException { + // Create PipelineStateManager + PipelineStateManagerV2 stateManager = PipelineStateManagerV2Impl + .newBuilder().setPipelineStore(pipelineStore) + .setRatisServer(scmhaManager.getRatisServer()) + .setNodeManager(nodeManager) + .build(); + + // Create PipelineManager + PipelineManagerV2Impl pipelineManager = new PipelineManagerV2Impl(conf, + nodeManager, stateManager, pipelineFactory); + + // Create background thread. + Scheduler scheduler = new Scheduler( + "RatisPipelineUtilsThread", false, 1); + BackgroundPipelineCreator backgroundPipelineCreator = + new BackgroundPipelineCreator(pipelineManager, scheduler, conf); + pipelineManager.setBackgroundPipelineCreator(backgroundPipelineCreator); + pipelineManager.setScheduler(scheduler); + + return pipelineManager; + } + + @Override + public Pipeline createPipeline(ReplicationType type, + ReplicationFactor factor) throws IOException { + if (!isPipelineCreationAllowed() && factor != ReplicationFactor.ONE) { + LOG.debug("Pipeline creation is not allowed until safe mode prechecks " + + "complete"); + throw new IOException("Pipeline creation is not allowed as safe mode " + + "prechecks have not yet passed"); + } + lock.writeLock().lock(); + try { + Pipeline pipeline = pipelineFactory.create(type, factor); + stateManager.addPipeline(pipeline.getProtobufMessage()); + recordMetricsForPipeline(pipeline); + return pipeline; + } catch (IOException ex) { + LOG.error("Failed to create pipeline of type {} and factor {}. " + + "Exception: {}", type, factor, ex.getMessage()); + metrics.incNumPipelineCreationFailed(); + throw ex; + } finally { + lock.writeLock().unlock(); + } + } + + @Override + public Pipeline createPipeline(ReplicationType type, ReplicationFactor factor, + List nodes) { + return null; + } + + @Override + public Pipeline getPipeline(PipelineID pipelineID) + throws PipelineNotFoundException { + lock.readLock().lock(); + try { + return stateManager.getPipeline(pipelineID); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public boolean containsPipeline(PipelineID pipelineID) { + lock.readLock().lock(); + try { + getPipeline(pipelineID); + return true; + } catch (PipelineNotFoundException e) { + return false; + } finally { + lock.readLock().unlock(); + } + } + + @Override + public List getPipelines() { + lock.readLock().lock(); + try { + return stateManager.getPipelines(); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public List getPipelines(ReplicationType type) { + try { + return stateManager.getPipelines(type); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public List getPipelines(ReplicationType type, + ReplicationFactor factor) { + lock.readLock().lock(); + try { + return stateManager.getPipelines(type, factor); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public List getPipelines(ReplicationType type, + Pipeline.PipelineState state) { + lock.readLock().lock(); + try { + return stateManager.getPipelines(type, state); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public List getPipelines(ReplicationType type, + ReplicationFactor factor, + Pipeline.PipelineState state) { + lock.readLock().lock(); + try { + return stateManager.getPipelines(type, factor, state); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public List getPipelines( + ReplicationType type, ReplicationFactor factor, + Pipeline.PipelineState state, Collection excludeDns, + Collection excludePipelines) { + lock.readLock().lock(); + try { + return stateManager + .getPipelines(type, factor, state, excludeDns, excludePipelines); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public void addContainerToPipeline( + PipelineID pipelineID, ContainerID containerID) throws IOException { + lock.writeLock().lock(); + try { + stateManager.addContainerToPipeline(pipelineID, containerID); + } finally { + lock.writeLock().unlock(); + } + } + + @Override + public void removeContainerFromPipeline( + PipelineID pipelineID, ContainerID containerID) throws IOException { + lock.writeLock().lock(); + try { + stateManager.removeContainerFromPipeline(pipelineID, containerID); + } finally { + lock.writeLock().unlock(); + } + } + + @Override + public NavigableSet getContainersInPipeline( + PipelineID pipelineID) throws IOException { + lock.readLock().lock(); + try { + return stateManager.getContainers(pipelineID); + } finally { + lock.readLock().unlock(); + } + } + + @Override + public int getNumberOfContainers(PipelineID pipelineID) throws IOException { + return stateManager.getNumberOfContainers(pipelineID); + } + + @Override + public void openPipeline(PipelineID pipelineId) throws IOException { + lock.writeLock().lock(); + try { + Pipeline pipeline = stateManager.getPipeline(pipelineId); + if (pipeline.isClosed()) { + throw new IOException("Closed pipeline can not be opened"); + } + if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) { + LOG.info("Pipeline {} moved to OPEN state", pipeline); + stateManager.updatePipelineState(pipelineId.getProtobuf(), + HddsProtos.PipelineState.PIPELINE_OPEN); + } + metrics.incNumPipelineCreated(); + metrics.createPerPipelineMetrics(pipeline); + } finally { + lock.writeLock().unlock(); + } + } + + /** + * Finalizes pipeline in the SCM. Removes pipeline and makes rpc call to + * destroy pipeline on the datanodes immediately or after timeout based on the + * value of onTimeout parameter. + * + * @param pipeline - Pipeline to be destroyed + * @param onTimeout - if true pipeline is removed and destroyed on + * datanodes after timeout + * @throws IOException + */ + @Override + public void finalizeAndDestroyPipeline(Pipeline pipeline, boolean onTimeout) + throws IOException { + LOG.info("Destroying pipeline:{}", pipeline); + finalizePipeline(pipeline.getId()); + if (onTimeout) { + long pipelineDestroyTimeoutInMillis = + conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, + ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + scheduler.schedule(() -> destroyPipeline(pipeline), + pipelineDestroyTimeoutInMillis, TimeUnit.MILLISECONDS, LOG, + String.format("Destroy pipeline failed for pipeline:%s", pipeline)); + } else { + destroyPipeline(pipeline); + } + } + + /** + * Moves the pipeline to CLOSED state and sends close container command for + * all the containers in the pipeline. + * + * @param pipelineId - ID of the pipeline to be moved to CLOSED state. + * @throws IOException + */ + private void finalizePipeline(PipelineID pipelineId) throws IOException { + lock.writeLock().lock(); + try { + Pipeline pipeline = stateManager.getPipeline(pipelineId); + if (!pipeline.isClosed()) { + stateManager.updatePipelineState(pipelineId.getProtobuf(), + HddsProtos.PipelineState.PIPELINE_CLOSED); + LOG.info("Pipeline {} moved to CLOSED state", pipeline); + } + + // TODO fire events to datanodes for closing pipelines +// Set containerIDs = stateManager.getContainers(pipelineId); +// for (ContainerID containerID : containerIDs) { +// eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID); +// } + metrics.removePipelineMetrics(pipelineId); + } finally { + lock.writeLock().unlock(); + } + } + + /** + * Removes pipeline from SCM. Sends ratis command to destroy pipeline on all + * the datanodes for ratis pipelines. + * + * @param pipeline - Pipeline to be destroyed + * @throws IOException + */ + protected void destroyPipeline(Pipeline pipeline) throws IOException { + pipelineFactory.close(pipeline.getType(), pipeline); + // remove the pipeline from the pipeline manager + removePipeline(pipeline.getId()); + triggerPipelineCreation(); + } + + /** + * Removes the pipeline from the db and pipeline state map. + * + * @param pipelineId - ID of the pipeline to be removed + * @throws IOException + */ + protected void removePipeline(PipelineID pipelineId) throws IOException { + lock.writeLock().lock(); + try { + stateManager.removePipeline(pipelineId.getProtobuf()); + metrics.incNumPipelineDestroyed(); + } catch (IOException ex) { + metrics.incNumPipelineDestroyFailed(); + throw ex; + } finally { + lock.writeLock().unlock(); + } + } + + @Override + public void scrubPipeline(ReplicationType type, ReplicationFactor factor) + throws IOException{ + if (type != ReplicationType.RATIS || factor != ReplicationFactor.THREE) { + // Only srub pipeline for RATIS THREE pipeline + return; + } + Instant currentTime = Instant.now(); + Long pipelineScrubTimeoutInMills = conf.getTimeDuration( + ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, + ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT_DEFAULT, + TimeUnit.MILLISECONDS); + List needToSrubPipelines = stateManager.getPipelines(type, factor, + Pipeline.PipelineState.ALLOCATED).stream() + .filter(p -> currentTime.toEpochMilli() - p.getCreationTimestamp() + .toEpochMilli() >= pipelineScrubTimeoutInMills) + .collect(Collectors.toList()); + for (Pipeline p : needToSrubPipelines) { + LOG.info("Scrubbing pipeline: id: " + p.getId().toString() + + " since it stays at ALLOCATED stage for " + + Duration.between(currentTime, p.getCreationTimestamp()).toMinutes() + + " mins."); + finalizeAndDestroyPipeline(p, false); + } + } + + /** + * Schedules a fixed interval job to create pipelines. + */ + @Override + public void startPipelineCreator() { + backgroundPipelineCreator.startFixedIntervalPipelineCreator(); + } + + /** + * Triggers pipeline creation after the specified time. + */ + @Override + public void triggerPipelineCreation() { + backgroundPipelineCreator.triggerPipelineCreation(); + } + + @Override + public void incNumBlocksAllocatedMetric(PipelineID id) { + metrics.incNumBlocksAllocated(id); + } + + /** + * Activates a dormant pipeline. + * + * @param pipelineID ID of the pipeline to activate. + * @throws IOException in case of any Exception + */ + @Override + public void activatePipeline(PipelineID pipelineID) + throws IOException { + stateManager.updatePipelineState(pipelineID.getProtobuf(), + HddsProtos.PipelineState.PIPELINE_OPEN); + } + + /** + * Deactivates an active pipeline. + * + * @param pipelineID ID of the pipeline to deactivate. + * @throws IOException in case of any Exception + */ + @Override + public void deactivatePipeline(PipelineID pipelineID) + throws IOException { + stateManager.updatePipelineState(pipelineID.getProtobuf(), + HddsProtos.PipelineState.PIPELINE_DORMANT); + } + + /** + * Wait a pipeline to be OPEN. + * + * @param pipelineID ID of the pipeline to wait for. + * @param timeout wait timeout, millisecond, 0 to use default value + * @throws IOException in case of any Exception, such as timeout + */ + @Override + public void waitPipelineReady(PipelineID pipelineID, long timeout) + throws IOException { + long st = Time.monotonicNow(); + if (timeout == 0) { + timeout = pipelineWaitDefaultTimeout; + } + + boolean ready; + Pipeline pipeline; + do { + try { + pipeline = stateManager.getPipeline(pipelineID); + } catch (PipelineNotFoundException e) { + throw new PipelineNotFoundException(String.format( + "Pipeline %s cannot be found", pipelineID)); + } + ready = pipeline.isOpen(); + if (!ready) { + try { + Thread.sleep((long)100); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + } while (!ready && Time.monotonicNow() - st < timeout); + + if (!ready) { + throw new IOException(String.format("Pipeline %s is not ready in %d ms", + pipelineID, timeout)); + } + } + + @Override + public Map getPipelineInfo() { + final Map pipelineInfo = new HashMap<>(); + for (Pipeline.PipelineState state : Pipeline.PipelineState.values()) { + pipelineInfo.put(state.toString(), 0); + } + stateManager.getPipelines().forEach(pipeline -> + pipelineInfo.computeIfPresent( + pipeline.getPipelineState().toString(), (k, v) -> v + 1)); + return pipelineInfo; + } + + /** + * Get SafeMode status. + * @return boolean + */ + @Override + public boolean getSafeModeStatus() { + return this.isInSafeMode.get(); + } + + @Override + public void close() throws IOException { + if (scheduler != null) { + scheduler.close(); + scheduler = null; + } + + if(pmInfoBean != null) { + MBeans.unregister(this.pmInfoBean); + pmInfoBean = null; + } + + SCMPipelineMetrics.unRegister(); + + // shutdown pipeline provider. + pipelineFactory.shutdown(); + } + + @Override + public void onMessage(SCMSafeModeManager.SafeModeStatus status, + EventPublisher publisher) { + // TODO: #CLUTIL - handle safemode getting re-enabled + boolean currentAllowPipelines = + pipelineCreationAllowed.getAndSet(status.isPreCheckComplete()); + boolean currentlyInSafeMode = + isInSafeMode.getAndSet(status.isInSafeMode()); + + // Trigger pipeline creation only if the preCheck status has changed to + // complete. + if (isPipelineCreationAllowed() && !currentAllowPipelines) { + triggerPipelineCreation(); + } + // Start the pipeline creation thread only when safemode switches off + if (!getSafeModeStatus() && currentlyInSafeMode) { + startPipelineCreator(); + } + } + + @VisibleForTesting + public boolean isPipelineCreationAllowed() { + return pipelineCreationAllowed.get(); + } + + private void setBackgroundPipelineCreator( + BackgroundPipelineCreator backgroundPipelineCreator) { + this.backgroundPipelineCreator = backgroundPipelineCreator; + } + + private void setScheduler(Scheduler scheduler) { + this.scheduler = scheduler; + } + + private void recordMetricsForPipeline(Pipeline pipeline) { + metrics.incNumPipelineAllocated(); + if (pipeline.isOpen()) { + metrics.incNumPipelineCreated(); + metrics.createPerPipelineMetrics(pipeline); + } + switch (pipeline.getType()) { + case STAND_ALONE: + return; + case RATIS: + List overlapPipelines = RatisPipelineUtils + .checkPipelineContainSameDatanodes(stateManager, pipeline); + if (!overlapPipelines.isEmpty()) { + // Count 1 overlap at a time. + metrics.incNumPipelineContainSameDatanodes(); + //TODO remove until pipeline allocation is proved equally distributed. + for (Pipeline overlapPipeline : overlapPipelines) { + LOG.info("Pipeline: " + pipeline.getId().toString() + + " contains same datanodes as previous pipelines: " + + overlapPipeline.getId().toString() + " nodeIds: " + + pipeline.getNodes().get(0).getUuid().toString() + + ", " + pipeline.getNodes().get(1).getUuid().toString() + + ", " + pipeline.getNodes().get(2).getUuid().toString()); + } + } + return; + case CHAINED: + // Not supported. + default: + // Not supported. + return; + } + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2.java new file mode 100644 index 000000000000..402157566c5e --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ *

http://www.apache.org/licenses/LICENSE-2.0 + *

+ *

Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.pipeline; + +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.metadata.Replicate; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.NavigableSet; + +/** + * Manages the state of pipelines in SCM. + */ +public interface PipelineStateManagerV2 { + + /** + * Adding pipeline would be replicated to Ratis. + * @param pipelineProto + * @throws IOException + */ + @Replicate + void addPipeline(HddsProtos.Pipeline pipelineProto) throws IOException; + + /** + * Removing pipeline would be replicated to Ratis. + * @param pipelineIDProto + * @return Pipeline removed + * @throws IOException + */ + @Replicate + void removePipeline(HddsProtos.PipelineID pipelineIDProto) + throws IOException; + + /** + * Updating pipeline state would be replicated to Ratis. + * @param pipelineIDProto + * @param newState + * @throws IOException + */ + @Replicate + void updatePipelineState(HddsProtos.PipelineID pipelineIDProto, + HddsProtos.PipelineState newState) + throws IOException; + + void addContainerToPipeline(PipelineID pipelineID, + ContainerID containerID) throws IOException; + + Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException; + + List getPipelines(); + + List getPipelines(HddsProtos.ReplicationType type); + + List getPipelines(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor); + + List getPipelines(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor, + Pipeline.PipelineState state); + + List getPipelines(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor, + Pipeline.PipelineState state, + Collection excludeDns, + Collection excludePipelines); + + List getPipelines(HddsProtos.ReplicationType type, + Pipeline.PipelineState... states); + + NavigableSet getContainers(PipelineID pipelineID) + throws IOException; + + int getNumberOfContainers(PipelineID pipelineID) throws IOException; + + + void removeContainerFromPipeline(PipelineID pipelineID, + ContainerID containerID) throws IOException; + + void close() throws Exception; +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java new file mode 100644 index 000000000000..c74dc8678f88 --- /dev/null +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java @@ -0,0 +1,226 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ *

http://www.apache.org/licenses/LICENSE-2.0 + *

+ *

Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.hdds.scm.pipeline; + +import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.ha.SCMHAInvocationHandler; +import org.apache.hadoop.hdds.scm.ha.SCMRatisServer; +import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.lang.reflect.Proxy; +import java.util.Collection; +import java.util.List; +import java.util.NavigableSet; + +/** + * Implementation of pipeline state manager. + * PipelineStateMap class holds the data structures related to pipeline and its + * state. All the read and write operations in PipelineStateMap are protected + * by a read write lock. + */ +public class PipelineStateManagerV2Impl implements PipelineStateManagerV2 { + + private static final Logger LOG = + LoggerFactory.getLogger(PipelineStateManager.class); + + private final PipelineStateMap pipelineStateMap; + private final NodeManager nodeManager; + private Table pipelineStore; + + public PipelineStateManagerV2Impl( + Table pipelineStore, NodeManager nodeManager) + throws IOException { + this.pipelineStateMap = new PipelineStateMap(); + this.nodeManager = nodeManager; + this.pipelineStore = pipelineStore; + initialize(); + } + + private void initialize() throws IOException { + if (pipelineStore == null || nodeManager == null) { + throw new IOException("PipelineStore cannot be null"); + } + if (pipelineStore.isEmpty()) { + LOG.info("No pipeline exists in current db"); + return; + } + TableIterator> + iterator = pipelineStore.iterator(); + while (iterator.hasNext()) { + Pipeline pipeline = iterator.next().getValue(); + addPipeline(pipeline.getProtobufMessage()); + } + } + + @Override + public void addPipeline(HddsProtos.Pipeline pipelineProto) + throws IOException { + Pipeline pipeline = Pipeline.getFromProtobuf(pipelineProto); + pipelineStore.put(pipeline.getId(), pipeline); + pipelineStateMap.addPipeline(pipeline); + nodeManager.addPipeline(pipeline); + LOG.info("Created pipeline {}.", pipeline); + } + + @Override + public void addContainerToPipeline( + PipelineID pipelineId, ContainerID containerID) + throws IOException { + pipelineStateMap.addContainerToPipeline(pipelineId, containerID); + } + + @Override + public Pipeline getPipeline(PipelineID pipelineID) + throws PipelineNotFoundException { + return pipelineStateMap.getPipeline(pipelineID); + } + + @Override + public List getPipelines() { + return pipelineStateMap.getPipelines(); + } + + @Override + public List getPipelines(HddsProtos.ReplicationType type) { + return pipelineStateMap.getPipelines(type); + } + + @Override + public List getPipelines( + HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor) { + return pipelineStateMap.getPipelines(type, factor); + } + + @Override + public List getPipelines( + HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, + Pipeline.PipelineState state) { + return pipelineStateMap.getPipelines(type, factor, state); + } + + @Override + public List getPipelines( + HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, + Pipeline.PipelineState state, Collection excludeDns, + Collection excludePipelines) { + return pipelineStateMap + .getPipelines(type, factor, state, excludeDns, excludePipelines); + } + + @Override + public List getPipelines(HddsProtos.ReplicationType type, + Pipeline.PipelineState... states) { + return pipelineStateMap.getPipelines(type, states); + } + + @Override + public NavigableSet getContainers(PipelineID pipelineID) + throws IOException { + return pipelineStateMap.getContainers(pipelineID); + } + + @Override + public int getNumberOfContainers(PipelineID pipelineID) throws IOException { + return pipelineStateMap.getNumberOfContainers(pipelineID); + } + + @Override + public void removePipeline(HddsProtos.PipelineID pipelineIDProto) + throws IOException { + PipelineID pipelineID = PipelineID.getFromProtobuf(pipelineIDProto); + pipelineStore.delete(pipelineID); + Pipeline pipeline = pipelineStateMap.removePipeline(pipelineID); + nodeManager.removePipeline(pipeline); + LOG.info("Pipeline {} removed.", pipeline); + return; + } + + + @Override + public void removeContainerFromPipeline( + PipelineID pipelineID, ContainerID containerID) throws IOException { + pipelineStateMap.removeContainerFromPipeline(pipelineID, containerID); + } + + @Override + public void updatePipelineState( + HddsProtos.PipelineID pipelineIDProto, HddsProtos.PipelineState newState) + throws IOException { + pipelineStateMap.updatePipelineState( + PipelineID.getFromProtobuf(pipelineIDProto), + Pipeline.PipelineState.fromProtobuf(newState)); + } + + @Override + public void close() throws Exception { + pipelineStore.close(); + } + + public static Builder newBuilder() { + return new Builder(); + } + + /** + * Builder for PipelineStateManager. + */ + public static class Builder { + private Table pipelineStore; + private NodeManager nodeManager; + private SCMRatisServer scmRatisServer; + + public Builder setRatisServer(final SCMRatisServer ratisServer) { + scmRatisServer = ratisServer; + return this; + } + + public Builder setNodeManager(final NodeManager scmNodeManager) { + nodeManager = scmNodeManager; + return this; + } + + public Builder setPipelineStore( + final Table pipelineTable) { + this.pipelineStore = pipelineTable; + return this; + } + + public PipelineStateManagerV2 build() throws IOException { + Preconditions.checkNotNull(pipelineStore); + + final PipelineStateManagerV2 pipelineStateManager = + new PipelineStateManagerV2Impl(pipelineStore, nodeManager); + + final SCMHAInvocationHandler invocationHandler = + new SCMHAInvocationHandler(SCMRatisProtocol.RequestType.PIPELINE, + pipelineStateManager, scmRatisServer); + + return (PipelineStateManagerV2) Proxy.newProxyInstance( + SCMHAInvocationHandler.class.getClassLoader(), + new Class[]{PipelineStateManagerV2.class}, invocationHandler); + } + } +} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index 5c9b202ff62d..edc40af10469 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -117,4 +117,23 @@ static List checkPipelineContainSameDatanodes( p.sameDatanodes(pipeline))) .collect(Collectors.toList()); } + + /** + * Return the list of pipelines who share the same set of datanodes + * with the input pipeline. + * + * @param stateManager PipelineStateManager + * @param pipeline input pipeline + * @return list of matched pipeline + */ + static List checkPipelineContainSameDatanodes( + PipelineStateManagerV2 stateManager, Pipeline pipeline) { + return stateManager.getPipelines( + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE) + .stream().filter(p -> !p.getId().equals(pipeline.getId()) && + (p.getPipelineState() != Pipeline.PipelineState.CLOSED && + p.sameDatanodes(pipeline))) + .collect(Collectors.toList()); + } } From 988b23ad30396e6f6233765248bb19938bc23c68 Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Wed, 3 Jun 2020 11:34:21 +0800 Subject: [PATCH 08/51] HDDS-3693 Switch to new StateManager interface. (#1007) Co-authored-by: Li Cheng --- .../hdds/scm/pipeline/PipelineFactory.java | 2 +- .../scm/pipeline/PipelineManagerV2Impl.java | 22 +++-- .../scm/pipeline/PipelinePlacementPolicy.java | 5 +- .../hdds/scm/pipeline/PipelineProvider.java | 6 +- .../scm/pipeline/PipelineStateManager.java | 80 +++++++++++++---- .../pipeline/PipelineStateManagerV2Impl.java | 52 +++++++++-- .../scm/pipeline/RatisPipelineProvider.java | 4 +- .../hdds/scm/pipeline/RatisPipelineUtils.java | 2 +- .../hdds/scm/pipeline/SCMPipelineManager.java | 4 +- .../scm/pipeline/SimplePipelineProvider.java | 2 +- ...eStateManagerV2.java => StateManager.java} | 23 ++++- .../pipeline/MockRatisPipelineProvider.java | 20 +++-- .../scm/pipeline/TestPipelieManagerImpl.java | 87 +++++++++++++++++++ .../TestPipelineDatanodesIntersection.java | 2 +- .../pipeline/TestPipelineStateManager.java | 2 +- .../ozone/recon/scm/ReconPipelineManager.java | 3 +- 16 files changed, 262 insertions(+), 54 deletions(-) rename hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/{PipelineStateManagerV2.java => StateManager.java} (83%) create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelieManagerImpl.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java index e1cf382d1a2e..bdd5053a6b07 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java @@ -39,7 +39,7 @@ public class PipelineFactory { private Map providers; - PipelineFactory(NodeManager nodeManager, PipelineStateManager stateManager, + PipelineFactory(NodeManager nodeManager, StateManager stateManager, ConfigurationSource conf, EventPublisher eventPublisher) { providers = new HashMap<>(); providers.put(ReplicationType.STAND_ALONE, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index a6a3249fc7dc..f4510004fae7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -58,13 +58,13 @@ * All the write operations for pipelines must come via PipelineManager. * It synchronises all write and read operations via a ReadWriteLock. */ -public class PipelineManagerV2Impl implements PipelineManager { +public final class PipelineManagerV2Impl implements PipelineManager { private static final Logger LOG = LoggerFactory.getLogger(SCMPipelineManager.class); private final ReadWriteLock lock; private PipelineFactory pipelineFactory; - private PipelineStateManagerV2 stateManager; + private StateManager stateManager; private Scheduler scheduler; private BackgroundPipelineCreator backgroundPipelineCreator; private final ConfigurationSource conf; @@ -77,9 +77,9 @@ public class PipelineManagerV2Impl implements PipelineManager { // to prevent pipelines being created until sufficient nodes have registered. private final AtomicBoolean pipelineCreationAllowed; - public PipelineManagerV2Impl(ConfigurationSource conf, + private PipelineManagerV2Impl(ConfigurationSource conf, NodeManager nodeManager, - PipelineStateManagerV2 pipelineStateManager, + StateManager pipelineStateManager, PipelineFactory pipelineFactory) { this.lock = new ReentrantReadWriteLock(); this.pipelineFactory = pipelineFactory; @@ -100,17 +100,20 @@ public PipelineManagerV2Impl(ConfigurationSource conf, this.pipelineCreationAllowed = new AtomicBoolean(!this.isInSafeMode.get()); } - public static PipelineManager newPipelineManager( + public static PipelineManagerV2Impl newPipelineManager( ConfigurationSource conf, SCMHAManager scmhaManager, NodeManager nodeManager, Table pipelineStore, - PipelineFactory pipelineFactory) throws IOException { + EventPublisher eventPublisher) throws IOException { // Create PipelineStateManager - PipelineStateManagerV2 stateManager = PipelineStateManagerV2Impl + StateManager stateManager = PipelineStateManagerV2Impl .newBuilder().setPipelineStore(pipelineStore) .setRatisServer(scmhaManager.getRatisServer()) .setNodeManager(nodeManager) .build(); + // Create PipelineFactory + PipelineFactory pipelineFactory = new PipelineFactory( + nodeManager, stateManager, conf, eventPublisher); // Create PipelineManager PipelineManagerV2Impl pipelineManager = new PipelineManagerV2Impl(conf, nodeManager, stateManager, pipelineFactory); @@ -572,6 +575,11 @@ public boolean isPipelineCreationAllowed() { return pipelineCreationAllowed.get(); } + @VisibleForTesting + public void allowPipelineCreation() { + this.pipelineCreationAllowed.set(true); + } + private void setBackgroundPipelineCreator( BackgroundPipelineCreator backgroundPipelineCreator) { this.backgroundPipelineCreator = backgroundPipelineCreator; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java index 524b5ec8b216..65732716c769 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java @@ -51,7 +51,7 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy { static final Logger LOG = LoggerFactory.getLogger(PipelinePlacementPolicy.class); private final NodeManager nodeManager; - private final PipelineStateManager stateManager; + private final StateManager stateManager; private final ConfigurationSource conf; private final int heavyNodeCriteria; private static final int REQUIRED_RACKS = 2; @@ -65,7 +65,8 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy { * @param conf Configuration */ public PipelinePlacementPolicy(final NodeManager nodeManager, - final PipelineStateManager stateManager, final ConfigurationSource conf) { + final StateManager stateManager, + final ConfigurationSource conf) { super(nodeManager, conf); this.nodeManager = nodeManager; this.conf = conf; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java index 533f77e0e8eb..576d415f8ac3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineProvider.java @@ -37,10 +37,10 @@ public abstract class PipelineProvider { private final NodeManager nodeManager; - private final PipelineStateManager stateManager; + private final StateManager stateManager; public PipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager) { + StateManager stateManager) { this.nodeManager = nodeManager; this.stateManager = stateManager; } @@ -54,7 +54,7 @@ public NodeManager getNodeManager() { return nodeManager; } - public PipelineStateManager getPipelineStateManager() { + public StateManager getPipelineStateManager() { return stateManager; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java index bb56a0380b1b..de6f18677b2c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.pipeline; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -38,7 +39,7 @@ * state. All the read and write operations in PipelineStateMap are protected * by a read write lock. */ -public class PipelineStateManager { +public class PipelineStateManager implements StateManager { private static final Logger LOG = LoggerFactory.getLogger(PipelineStateManager.class); @@ -48,71 +49,90 @@ public class PipelineStateManager { public PipelineStateManager() { this.pipelineStateMap = new PipelineStateMap(); } - + @Override public void addPipeline(Pipeline pipeline) throws IOException { pipelineStateMap.addPipeline(pipeline); LOG.info("Created pipeline {}", pipeline); } - void addContainerToPipeline(PipelineID pipelineId, ContainerID containerID) + @Override + public void addContainerToPipeline(PipelineID pipelineId, + ContainerID containerID) throws IOException { pipelineStateMap.addContainerToPipeline(pipelineId, containerID); } - Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException { + @Override + public Pipeline getPipeline(PipelineID pipelineID) + throws PipelineNotFoundException { return pipelineStateMap.getPipeline(pipelineID); } + @Override public List getPipelines() { return pipelineStateMap.getPipelines(); } - List getPipelines(ReplicationType type) { + @Override + public List getPipelines(ReplicationType type) { return pipelineStateMap.getPipelines(type); } - List getPipelines(ReplicationType type, ReplicationFactor factor) { + @Override + public List getPipelines(ReplicationType type, + ReplicationFactor factor) { return pipelineStateMap.getPipelines(type, factor); } - List getPipelines(ReplicationType type, ReplicationFactor factor, + @Override + public List getPipelines(ReplicationType type, + ReplicationFactor factor, PipelineState state) { return pipelineStateMap.getPipelines(type, factor, state); } - List getPipelines(ReplicationType type, ReplicationFactor factor, + @Override + public List getPipelines( + ReplicationType type, ReplicationFactor factor, PipelineState state, Collection excludeDns, Collection excludePipelines) { return pipelineStateMap .getPipelines(type, factor, state, excludeDns, excludePipelines); } - List getPipelines(ReplicationType type, PipelineState... states) { + @Override + public List getPipelines(ReplicationType type, + PipelineState... states) { return pipelineStateMap.getPipelines(type, states); } - NavigableSet getContainers(PipelineID pipelineID) + @Override + public NavigableSet getContainers(PipelineID pipelineID) throws IOException { return pipelineStateMap.getContainers(pipelineID); } - int getNumberOfContainers(PipelineID pipelineID) throws IOException { + @Override + public int getNumberOfContainers(PipelineID pipelineID) throws IOException { return pipelineStateMap.getNumberOfContainers(pipelineID); } - Pipeline removePipeline(PipelineID pipelineID) throws IOException { + @Override + public Pipeline removePipeline(PipelineID pipelineID) throws IOException { Pipeline pipeline = pipelineStateMap.removePipeline(pipelineID); LOG.info("Pipeline {} removed from db", pipeline); return pipeline; } - void removeContainerFromPipeline(PipelineID pipelineID, + @Override + public void removeContainerFromPipeline(PipelineID pipelineID, ContainerID containerID) throws IOException { pipelineStateMap.removeContainerFromPipeline(pipelineID, containerID); } - Pipeline finalizePipeline(PipelineID pipelineId) - throws PipelineNotFoundException { + @Override + public Pipeline finalizePipeline(PipelineID pipelineId) + throws IOException { Pipeline pipeline = pipelineStateMap.getPipeline(pipelineId); if (!pipeline.isClosed()) { pipeline = pipelineStateMap @@ -122,7 +142,8 @@ Pipeline finalizePipeline(PipelineID pipelineId) return pipeline; } - Pipeline openPipeline(PipelineID pipelineId) throws IOException { + @Override + public Pipeline openPipeline(PipelineID pipelineId) throws IOException { Pipeline pipeline = pipelineStateMap.getPipeline(pipelineId); if (pipeline.isClosed()) { throw new IOException("Closed pipeline can not be opened"); @@ -141,6 +162,7 @@ Pipeline openPipeline(PipelineID pipelineId) throws IOException { * @param pipelineID ID of the pipeline to activate. * @throws IOException in case of any Exception */ + @Override public void activatePipeline(PipelineID pipelineID) throws IOException { pipelineStateMap @@ -153,14 +175,40 @@ public void activatePipeline(PipelineID pipelineID) * @param pipelineID ID of the pipeline to deactivate. * @throws IOException in case of any Exception */ + @Override public void deactivatePipeline(PipelineID pipelineID) throws IOException { pipelineStateMap .updatePipelineState(pipelineID, PipelineState.DORMANT); } + @Override public void updatePipelineState(PipelineID id, PipelineState newState) throws PipelineNotFoundException { pipelineStateMap.updatePipelineState(id, newState); } + + @Override + public void addPipeline(HddsProtos.Pipeline pipelineProto) + throws IOException { + throw new IOException("Not supported."); + } + + @Override + public void removePipeline(HddsProtos.PipelineID pipelineIDProto) + throws IOException { + throw new IOException("Not supported."); + } + + @Override + public void updatePipelineState(HddsProtos.PipelineID pipelineIDProto, + HddsProtos.PipelineState newState) + throws IOException { + throw new IOException("Not supported."); + } + + @Override + public void close() { + // Do nothing + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java index c74dc8678f88..703cdec0c0cb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2Impl.java @@ -42,7 +42,7 @@ * state. All the read and write operations in PipelineStateMap are protected * by a read write lock. */ -public class PipelineStateManagerV2Impl implements PipelineStateManagerV2 { +public class PipelineStateManagerV2Impl implements StateManager { private static final Logger LOG = LoggerFactory.getLogger(PipelineStateManager.class); @@ -180,6 +180,48 @@ public void close() throws Exception { pipelineStore.close(); } + // TODO Remove legacy + @Override + public void addPipeline(Pipeline pipeline) throws IOException { + throw new IOException("Not supported."); + } + + @Override + public Pipeline removePipeline(PipelineID pipelineID) throws IOException { + throw new IOException("Not supported."); + } + + @Override + public void updatePipelineState(PipelineID id, + Pipeline.PipelineState newState) + throws IOException { + throw new IOException("Not supported."); + } + + @Override + public Pipeline finalizePipeline(PipelineID pipelineId) + throws IOException { + throw new IOException("Not supported."); + } + + + @Override + public Pipeline openPipeline(PipelineID pipelineId) throws IOException { + throw new IOException("Not supported."); + } + + @Override + public void activatePipeline(PipelineID pipelineID) throws IOException { + throw new IOException("Not supported."); + } + + @Override + public void deactivatePipeline(PipelineID pipelineID) throws IOException { + throw new IOException("Not supported."); + } + + // legacy interfaces end + public static Builder newBuilder() { return new Builder(); } @@ -208,19 +250,19 @@ public Builder setPipelineStore( return this; } - public PipelineStateManagerV2 build() throws IOException { + public StateManager build() throws IOException { Preconditions.checkNotNull(pipelineStore); - final PipelineStateManagerV2 pipelineStateManager = + final StateManager pipelineStateManager = new PipelineStateManagerV2Impl(pipelineStore, nodeManager); final SCMHAInvocationHandler invocationHandler = new SCMHAInvocationHandler(SCMRatisProtocol.RequestType.PIPELINE, pipelineStateManager, scmRatisServer); - return (PipelineStateManagerV2) Proxy.newProxyInstance( + return (StateManager) Proxy.newProxyInstance( SCMHAInvocationHandler.class.getClassLoader(), - new Class[]{PipelineStateManagerV2.class}, invocationHandler); + new Class[]{StateManager.class}, invocationHandler); } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 4d9154117081..821ed302fb3f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -54,8 +54,8 @@ public class RatisPipelineProvider extends PipelineProvider { private int maxPipelinePerDatanode; RatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, ConfigurationSource conf, - EventPublisher eventPublisher) { + StateManager stateManager, ConfigurationSource conf, + EventPublisher eventPublisher) { super(nodeManager, stateManager); this.conf = conf; this.eventPublisher = eventPublisher; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index edc40af10469..19a8fc5207c5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -127,7 +127,7 @@ static List checkPipelineContainSameDatanodes( * @return list of matched pipeline */ static List checkPipelineContainSameDatanodes( - PipelineStateManagerV2 stateManager, Pipeline pipeline) { + StateManager stateManager, Pipeline pipeline) { return stateManager.getPipelines( HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index 941ce19f61cb..71c31907e833 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -68,7 +68,7 @@ public class SCMPipelineManager implements PipelineManager { private final ReadWriteLock lock; private PipelineFactory pipelineFactory; - private PipelineStateManager stateManager; + private StateManager stateManager; private final BackgroundPipelineCreator backgroundPipelineCreator; private Scheduler scheduler; @@ -133,7 +133,7 @@ protected SCMPipelineManager(ConfigurationSource conf, this.pipelineCreationAllowed = new AtomicBoolean(!this.isInSafeMode.get()); } - public PipelineStateManager getStateManager() { + public StateManager getStateManager() { return stateManager; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java index c7b63055b89e..69711bba0f3e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SimplePipelineProvider.java @@ -34,7 +34,7 @@ public class SimplePipelineProvider extends PipelineProvider { public SimplePipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager) { + StateManager stateManager) { super(nodeManager, stateManager); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/StateManager.java similarity index 83% rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2.java rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/StateManager.java index 402157566c5e..3a772e56d88a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManagerV2.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/StateManager.java @@ -29,8 +29,9 @@ /** * Manages the state of pipelines in SCM. + * TODO Rename to PipelineStateManager once the old state manager is removed. */ -public interface PipelineStateManagerV2 { +public interface StateManager { /** * Adding pipeline would be replicated to Ratis. @@ -96,4 +97,24 @@ void removeContainerFromPipeline(PipelineID pipelineID, ContainerID containerID) throws IOException; void close() throws Exception; + + // TODO remove legacy interfaces once we switch to Ratis based. + + void addPipeline(Pipeline pipeline) throws IOException; + + Pipeline removePipeline(PipelineID pipelineID) throws IOException; + + void updatePipelineState(PipelineID id, Pipeline.PipelineState newState) + throws IOException; + + Pipeline finalizePipeline(PipelineID pipelineId) + throws IOException; + + Pipeline openPipeline(PipelineID pipelineId) throws IOException; + + void activatePipeline(PipelineID pipelineID) + throws IOException; + + void deactivatePipeline(PipelineID pipelineID) + throws IOException; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java index f9fb15053045..e355877e269c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java @@ -36,29 +36,31 @@ public class MockRatisPipelineProvider extends RatisPipelineProvider { private boolean autoOpenPipeline; private boolean isHealthy; - public MockRatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, ConfigurationSource conf, - EventPublisher eventPublisher, boolean autoOpen) { + public MockRatisPipelineProvider( + NodeManager nodeManager, StateManager stateManager, + ConfigurationSource conf, EventPublisher eventPublisher, + boolean autoOpen) { super(nodeManager, stateManager, conf, eventPublisher); autoOpenPipeline = autoOpen; } public MockRatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, + StateManager stateManager, ConfigurationSource conf) { super(nodeManager, stateManager, conf, new EventQueue()); } public MockRatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, - ConfigurationSource conf, boolean isHealthy) { + StateManager stateManager, + ConfigurationSource conf, + boolean isHealthy) { super(nodeManager, stateManager, conf, new EventQueue()); this.isHealthy = isHealthy; } - public MockRatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, ConfigurationSource conf, - EventPublisher eventPublisher) { + public MockRatisPipelineProvider( + NodeManager nodeManager, StateManager stateManager, + ConfigurationSource conf, EventPublisher eventPublisher) { super(nodeManager, stateManager, conf, eventPublisher); autoOpenPipeline = true; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelieManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelieManagerImpl.java new file mode 100644 index 000000000000..a2a8e25d0afa --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelieManagerImpl.java @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.pipeline; + +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.container.MockNodeManager; +import org.apache.hadoop.hdds.scm.container.TestContainerManagerImpl; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; +import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; +import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.util.UUID; + +/** + * Tests for PipelineManagerImpl. + */ +public class TestPipelieManagerImpl { + private PipelineManagerV2Impl pipelineManager; + private File testDir; + private DBStore dbStore; + + @Before + public void init() throws Exception { + final OzoneConfiguration conf = SCMTestUtils.getConf(); + testDir = GenericTestUtils.getTestDir( + TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); + pipelineManager = PipelineManagerV2Impl.newPipelineManager( + conf, MockSCMHAManager.getInstance(), + new MockNodeManager(true, 20), + SCMDBDefinition.PIPELINES.getTable(dbStore), new EventQueue()); + } + + @After + public void cleanup() throws Exception { + if (pipelineManager != null) { + pipelineManager.close(); + } + if (dbStore != null) { + dbStore.close(); + } + FileUtil.fullyDelete(testDir); + } + + @Test + public void testCreatePipeline() throws Exception { + Assert.assertTrue(pipelineManager.getPipelines().isEmpty()); + pipelineManager.allowPipelineCreation(); + Pipeline pipeline1 = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + Assert.assertEquals(1, pipelineManager.getPipelines().size()); + Assert.assertTrue(pipelineManager.containsPipeline(pipeline1.getId())); + + Pipeline pipeline2 = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); + Assert.assertEquals(2, pipelineManager.getPipelines().size()); + Assert.assertTrue(pipelineManager.containsPipeline(pipeline2.getId())); + } +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java index 41eea3d9dc67..3320081a9f2b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java @@ -80,7 +80,7 @@ public void testPipelineDatanodesIntersection() { NodeManager nodeManager= new MockNodeManager(true, nodeCount); conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, nodeHeaviness); conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false); - PipelineStateManager stateManager = new PipelineStateManager(); + StateManager stateManager = new PipelineStateManager(); PipelineProvider provider = new MockRatisPipelineProvider(nodeManager, stateManager, conf); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java index 6bff5813d24b..8252e2c9df25 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java @@ -37,7 +37,7 @@ */ public class TestPipelineStateManager { - private PipelineStateManager stateManager; + private StateManager stateManager; @Before public void init() throws Exception { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java index a8dd3c990ca3..beed59195c90 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; -import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager; import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.apache.hadoop.hdds.server.events.EventPublisher; @@ -118,7 +117,7 @@ public void removeInvalidPipelines(List pipelinesFromScm) { if (!p.getPipelineState().equals(CLOSED)) { try { getStateManager().updatePipelineState(pipelineID, CLOSED); - } catch (PipelineNotFoundException e) { + } catch (IOException e) { LOG.warn("Pipeline {} not found while updating state. ", p.getId(), e); } From 53559397fd5f70112b599df2e330e869a1a934dd Mon Sep 17 00:00:00 2001 From: Nandakumar Date: Thu, 4 Jun 2020 11:49:22 +0530 Subject: [PATCH 09/51] HDDS-3711. Handle inner classes in SCMRatisRequest and SCMRatisResponse. (#1016) Merge in. Thanks @nandakumar131 for the contribution --- .../java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java | 2 +- .../java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java index d65c23502b58..3e516537a695 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java @@ -93,7 +93,7 @@ public Message encode() throws InvalidProtocolBufferException { final List args = new ArrayList<>(); for (Object argument : arguments) { final MethodArgument.Builder argBuilder = MethodArgument.newBuilder(); - argBuilder.setType(argument.getClass().getCanonicalName()); + argBuilder.setType(argument.getClass().getName()); if (argument instanceof GeneratedMessage) { argBuilder.setValue(((GeneratedMessage) argument).toByteString()); } else if (argument instanceof ProtocolMessageEnum) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java index 21ca4be50841..b3ec5436bcd3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisResponse.java @@ -84,7 +84,7 @@ public static Message encode(final Object result) final SCMRatisResponseProto response = SCMRatisResponseProto.newBuilder() - .setType(result.getClass().getCanonicalName()) + .setType(result.getClass().getName()) .setValue(value) .build(); return Message.valueOf( From 8e86480896c86f6e05c8339718611c4b264dcd8e Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Mon, 15 Jun 2020 21:12:52 +0800 Subject: [PATCH 10/51] HDDS-3679 Add tests for PipelineManager V2. (#1019) --- .../scm/pipeline/PipelineManagerV2Impl.java | 8 +- .../scm/pipeline/PipelineStateManager.java | 4 +- .../scm/pipeline/TestPipelieManagerImpl.java | 87 ---- .../scm/pipeline/TestPipelineManagerImpl.java | 462 ++++++++++++++++++ 4 files changed, 468 insertions(+), 93 deletions(-) delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelieManagerImpl.java create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index f4510004fae7..3732add07927 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -299,8 +299,8 @@ public void openPipeline(PipelineID pipelineId) throws IOException { } if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) { LOG.info("Pipeline {} moved to OPEN state", pipeline); - stateManager.updatePipelineState(pipelineId.getProtobuf(), - HddsProtos.PipelineState.PIPELINE_OPEN); + stateManager.updatePipelineState( + pipelineId.getProtobuf(), HddsProtos.PipelineState.PIPELINE_OPEN); } metrics.incNumPipelineCreated(); metrics.createPerPipelineMetrics(pipeline); @@ -349,8 +349,8 @@ private void finalizePipeline(PipelineID pipelineId) throws IOException { try { Pipeline pipeline = stateManager.getPipeline(pipelineId); if (!pipeline.isClosed()) { - stateManager.updatePipelineState(pipelineId.getProtobuf(), - HddsProtos.PipelineState.PIPELINE_CLOSED); + stateManager.updatePipelineState( + pipelineId.getProtobuf(), HddsProtos.PipelineState.PIPELINE_CLOSED); LOG.info("Pipeline {} moved to CLOSED state", pipeline); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java index de6f18677b2c..899d8774b802 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java @@ -201,8 +201,8 @@ public void removePipeline(HddsProtos.PipelineID pipelineIDProto) } @Override - public void updatePipelineState(HddsProtos.PipelineID pipelineIDProto, - HddsProtos.PipelineState newState) + public void updatePipelineState( + HddsProtos.PipelineID pipelineIDProto, HddsProtos.PipelineState newState) throws IOException { throw new IOException("Not supported."); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelieManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelieManagerImpl.java deleted file mode 100644 index a2a8e25d0afa..000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelieManagerImpl.java +++ /dev/null @@ -1,87 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.scm.pipeline; - -import org.apache.hadoop.fs.FileUtil; -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.container.TestContainerManagerImpl; -import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; -import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; -import org.apache.hadoop.hdds.server.events.EventQueue; -import org.apache.hadoop.hdds.utils.db.DBStore; -import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.test.GenericTestUtils; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; - -import java.io.File; -import java.util.UUID; - -/** - * Tests for PipelineManagerImpl. - */ -public class TestPipelieManagerImpl { - private PipelineManagerV2Impl pipelineManager; - private File testDir; - private DBStore dbStore; - - @Before - public void init() throws Exception { - final OzoneConfiguration conf = SCMTestUtils.getConf(); - testDir = GenericTestUtils.getTestDir( - TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID()); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); - dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); - pipelineManager = PipelineManagerV2Impl.newPipelineManager( - conf, MockSCMHAManager.getInstance(), - new MockNodeManager(true, 20), - SCMDBDefinition.PIPELINES.getTable(dbStore), new EventQueue()); - } - - @After - public void cleanup() throws Exception { - if (pipelineManager != null) { - pipelineManager.close(); - } - if (dbStore != null) { - dbStore.close(); - } - FileUtil.fullyDelete(testDir); - } - - @Test - public void testCreatePipeline() throws Exception { - Assert.assertTrue(pipelineManager.getPipelines().isEmpty()); - pipelineManager.allowPipelineCreation(); - Pipeline pipeline1 = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); - Assert.assertEquals(1, pipelineManager.getPipelines().size()); - Assert.assertTrue(pipelineManager.containsPipeline(pipeline1.getId())); - - Pipeline pipeline2 = pipelineManager.createPipeline( - HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); - Assert.assertEquals(2, pipelineManager.getPipelines().size()); - Assert.assertTrue(pipelineManager.containsPipeline(pipeline2.getId())); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java new file mode 100644 index 000000000000..f8eeb6e48f8d --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -0,0 +1,462 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.pipeline; + +import com.google.common.base.Supplier; +import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.container.MockNodeManager; +import org.apache.hadoop.hdds.scm.container.TestContainerManagerImpl; +import org.apache.hadoop.hdds.scm.exceptions.SCMException; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; +import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; +import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; +import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; +import org.apache.hadoop.hdds.server.events.EventQueue; +import org.apache.hadoop.hdds.utils.db.DBStore; +import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.io.File; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT; +import static org.apache.hadoop.hdds.scm.pipeline.Pipeline.PipelineState.ALLOCATED; +import static org.apache.hadoop.test.MetricsAsserts.getLongCounter; +import static org.apache.hadoop.test.MetricsAsserts.getMetrics; +import static org.junit.Assert.fail; + +/** + * Tests for PipelineManagerImpl. + */ +public class TestPipelineManagerImpl { + private static OzoneConfiguration conf; + private static File testDir; + private DBStore dbStore; + private static MockNodeManager nodeManager; + private static int maxPipelineCount; + private static EventQueue eventQueue; + + @Before + public void init() throws Exception { + conf = SCMTestUtils.getConf(); + testDir = GenericTestUtils.getTestDir( + TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID()); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); + dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); + nodeManager = new MockNodeManager(true, 20); + eventQueue = new EventQueue(); + maxPipelineCount = nodeManager.getNodeCount(HddsProtos.NodeState.HEALTHY) * + conf.getInt(OZONE_DATANODE_PIPELINE_LIMIT, + OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT) / + HddsProtos.ReplicationFactor.THREE.getNumber(); + } + + @After + public void cleanup() throws Exception { + if (dbStore != null) { + dbStore.close(); + } + FileUtil.fullyDelete(testDir); + } + + private PipelineManagerV2Impl createPipelineManager() + throws IOException { + return PipelineManagerV2Impl.newPipelineManager( + conf, MockSCMHAManager.getInstance(), + nodeManager, + SCMDBDefinition.PIPELINES.getTable(dbStore), eventQueue); + } + + @Test + public void testCreatePipeline() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(); + Assert.assertTrue(pipelineManager.getPipelines().isEmpty()); + pipelineManager.allowPipelineCreation(); + Pipeline pipeline1 = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + Assert.assertEquals(1, pipelineManager.getPipelines().size()); + Assert.assertTrue(pipelineManager.containsPipeline(pipeline1.getId())); + + Pipeline pipeline2 = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.ONE); + Assert.assertEquals(2, pipelineManager.getPipelines().size()); + Assert.assertTrue(pipelineManager.containsPipeline(pipeline2.getId())); + pipelineManager.close(); + + PipelineManagerV2Impl pipelineManager2 = createPipelineManager(); + // Should be able to load previous pipelines. + Assert.assertFalse(pipelineManager.getPipelines().isEmpty()); + Assert.assertEquals(2, pipelineManager.getPipelines().size()); + pipelineManager.allowPipelineCreation(); + Pipeline pipeline3 = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + Assert.assertEquals(3, pipelineManager.getPipelines().size()); + Assert.assertTrue(pipelineManager.containsPipeline(pipeline3.getId())); + + pipelineManager2.close(); + } + + @Test + public void testUpdatePipelineStates() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(); + pipelineManager.allowPipelineCreation(); + Pipeline pipeline = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + Assert.assertEquals(1, pipelineManager.getPipelines().size()); + Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); + Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); + PipelineID pipelineID = pipeline.getId(); + + pipelineManager.openPipeline(pipelineID); + pipelineManager.addContainerToPipeline(pipelineID, ContainerID.valueof(1)); + Assert.assertTrue(pipelineManager + .getPipelines(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, + Pipeline.PipelineState.OPEN).contains(pipeline)); + + pipelineManager.deactivatePipeline(pipeline.getId()); + Assert.assertEquals(Pipeline.PipelineState.DORMANT, + pipelineManager.getPipeline(pipelineID).getPipelineState()); + Assert.assertFalse(pipelineManager + .getPipelines(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, + Pipeline.PipelineState.OPEN).contains(pipeline)); + + pipelineManager.activatePipeline(pipeline.getId()); + Assert.assertTrue(pipelineManager + .getPipelines(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, + Pipeline.PipelineState.OPEN).contains(pipeline)); + + pipelineManager.close(); + } + + @Test + public void testRemovePipeline() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(); + pipelineManager.allowPipelineCreation(); + // Create a pipeline + Pipeline pipeline = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + Assert.assertEquals(1, pipelineManager.getPipelines().size()); + Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); + Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); + + // Open the pipeline + pipelineManager.openPipeline(pipeline.getId()); + pipelineManager + .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1)); + Assert.assertTrue(pipelineManager + .getPipelines(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, + Pipeline.PipelineState.OPEN).contains(pipeline)); + + try { + pipelineManager.removePipeline(pipeline.getId()); + fail(); + } catch (IOException ioe) { + // Should not be able to remove the OPEN pipeline. + Assert.assertEquals(1, pipelineManager.getPipelines().size()); + } catch (Exception e) { + Assert.fail("Should not reach here."); + } + + // Destroy pipeline + pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + try { + pipelineManager.getPipeline(pipeline.getId()); + fail("Pipeline should not have been retrieved"); + } catch (PipelineNotFoundException e) { + // There should be no pipeline in pipelineManager. + Assert.assertEquals(0, pipelineManager.getPipelines().size()); + } + + pipelineManager.close(); + } + + @Test + public void testPipelineReport() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(); + pipelineManager.allowPipelineCreation(); + SCMSafeModeManager scmSafeModeManager = + new SCMSafeModeManager(conf, new ArrayList<>(), pipelineManager, + eventQueue); + Pipeline pipeline = pipelineManager + .createPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + + // pipeline is not healthy until all dns report + List nodes = pipeline.getNodes(); + Assert.assertFalse( + pipelineManager.getPipeline(pipeline.getId()).isHealthy()); + // get pipeline report from each dn in the pipeline + PipelineReportHandler pipelineReportHandler = + new PipelineReportHandler(scmSafeModeManager, pipelineManager, conf); + nodes.subList(0, 2).forEach(dn -> sendPipelineReport(dn, pipeline, + pipelineReportHandler, false)); + sendPipelineReport(nodes.get(nodes.size() - 1), pipeline, + pipelineReportHandler, true); + + // pipeline is healthy when all dns report + Assert + .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isHealthy()); + // pipeline should now move to open state + Assert + .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isOpen()); + + // close the pipeline + pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + + // pipeline report for destroyed pipeline should be ignored + nodes.subList(0, 2).forEach(dn -> sendPipelineReport(dn, pipeline, + pipelineReportHandler, false)); + sendPipelineReport(nodes.get(nodes.size() - 1), pipeline, + pipelineReportHandler, true); + + try { + pipelineManager.getPipeline(pipeline.getId()); + fail("Pipeline should not have been retrieved"); + } catch (PipelineNotFoundException e) { + // should reach here + } + + // clean up + pipelineManager.close(); + } + + @Test + public void testPipelineCreationFailedMetric() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(); + pipelineManager.allowPipelineCreation(); + + // No pipeline at start + MetricsRecordBuilder metrics = getMetrics( + SCMPipelineMetrics.class.getSimpleName()); + long numPipelineAllocated = getLongCounter("NumPipelineAllocated", + metrics); + Assert.assertEquals(0, numPipelineAllocated); + + // 3 DNs are unhealthy. + // Create 5 pipelines (Use up 15 Datanodes) + + for (int i = 0; i < maxPipelineCount; i++) { + Pipeline pipeline = pipelineManager + .createPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + Assert.assertNotNull(pipeline); + } + + metrics = getMetrics( + SCMPipelineMetrics.class.getSimpleName()); + numPipelineAllocated = getLongCounter("NumPipelineAllocated", metrics); + Assert.assertEquals(maxPipelineCount, numPipelineAllocated); + + long numPipelineCreateFailed = getLongCounter( + "NumPipelineCreationFailed", metrics); + Assert.assertEquals(0, numPipelineCreateFailed); + + //This should fail... + try { + pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + fail(); + } catch (SCMException ioe) { + // pipeline creation failed this time. + Assert.assertEquals(SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE, + ioe.getResult()); + } + + metrics = getMetrics( + SCMPipelineMetrics.class.getSimpleName()); + numPipelineAllocated = getLongCounter("NumPipelineAllocated", metrics); + Assert.assertEquals(maxPipelineCount, numPipelineAllocated); + + numPipelineCreateFailed = getLongCounter( + "NumPipelineCreationFailed", metrics); + Assert.assertEquals(1, numPipelineCreateFailed); + + // clean up + pipelineManager.close(); + } + + @Test + public void testPipelineOpenOnlyWhenLeaderReported() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(); + pipelineManager.allowPipelineCreation(); + + pipelineManager.onMessage( + new SCMSafeModeManager.SafeModeStatus(true, true), null); + Pipeline pipeline = pipelineManager + .createPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + // close manager + pipelineManager.close(); + // new pipeline manager loads the pipelines from the db in ALLOCATED state + pipelineManager = createPipelineManager(); + Assert.assertEquals(Pipeline.PipelineState.ALLOCATED, + pipelineManager.getPipeline(pipeline.getId()).getPipelineState()); + + SCMSafeModeManager scmSafeModeManager = + new SCMSafeModeManager(new OzoneConfiguration(), + new ArrayList<>(), pipelineManager, eventQueue); + PipelineReportHandler pipelineReportHandler = + new PipelineReportHandler(scmSafeModeManager, pipelineManager, conf); + + // Report pipelines with leaders + List nodes = pipeline.getNodes(); + Assert.assertEquals(3, nodes.size()); + // Send report for all but no leader + nodes.forEach(dn -> sendPipelineReport(dn, pipeline, pipelineReportHandler, + false)); + + Assert.assertEquals(Pipeline.PipelineState.ALLOCATED, + pipelineManager.getPipeline(pipeline.getId()).getPipelineState()); + + nodes.subList(0, 2).forEach(dn -> sendPipelineReport(dn, pipeline, + pipelineReportHandler, false)); + sendPipelineReport(nodes.get(nodes.size() - 1), pipeline, + pipelineReportHandler, true); + + Assert.assertEquals(Pipeline.PipelineState.OPEN, + pipelineManager.getPipeline(pipeline.getId()).getPipelineState()); + + pipelineManager.close(); + } + + @Test + public void testScrubPipeline() throws Exception { + // No timeout for pipeline scrubber. + conf.setTimeDuration( + OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1, + TimeUnit.MILLISECONDS); + + PipelineManagerV2Impl pipelineManager = createPipelineManager(); + pipelineManager.allowPipelineCreation(); + Pipeline pipeline = pipelineManager + .createPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + // At this point, pipeline is not at OPEN stage. + Assert.assertEquals(Pipeline.PipelineState.ALLOCATED, + pipeline.getPipelineState()); + + // pipeline should be seen in pipelineManager as ALLOCATED. + Assert.assertTrue(pipelineManager + .getPipelines(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, + Pipeline.PipelineState.ALLOCATED).contains(pipeline)); + pipelineManager.scrubPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + + // pipeline should be scrubbed. + Assert.assertFalse(pipelineManager + .getPipelines(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, + Pipeline.PipelineState.ALLOCATED).contains(pipeline)); + + pipelineManager.close(); + } + + @Test + public void testPipelineNotCreatedUntilSafeModePrecheck() throws Exception { + // No timeout for pipeline scrubber. + conf.setTimeDuration( + OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1, + TimeUnit.MILLISECONDS); + + PipelineManagerV2Impl pipelineManager = createPipelineManager(); + try { + pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + fail("Pipelines should not have been created"); + } catch (IOException e) { + // No pipeline is created. + Assert.assertTrue(pipelineManager.getPipelines().isEmpty()); + } + + // Ensure a pipeline of factor ONE can be created - no exceptions should be + // raised. + Pipeline pipeline = pipelineManager + .createPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE); + Assert.assertTrue(pipelineManager + .getPipelines(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.ONE).contains(pipeline)); + + // Simulate safemode check exiting. + pipelineManager.onMessage( + new SCMSafeModeManager.SafeModeStatus(true, true), null); + GenericTestUtils.waitFor(new Supplier() { + @Override + public Boolean get() { + return pipelineManager.getPipelines().size() != 0; + } + }, 100, 10000); + pipelineManager.close(); + } + + @Test + public void testSafeModeUpdatedOnSafemodeExit() throws Exception { + // No timeout for pipeline scrubber. + conf.setTimeDuration( + OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1, + TimeUnit.MILLISECONDS); + + PipelineManagerV2Impl pipelineManager = createPipelineManager(); + Assert.assertTrue(pipelineManager.getSafeModeStatus()); + Assert.assertFalse(pipelineManager.isPipelineCreationAllowed()); + // First pass pre-check as true, but safemode still on + pipelineManager.onMessage( + new SCMSafeModeManager.SafeModeStatus(true, true), null); + Assert.assertTrue(pipelineManager.getSafeModeStatus()); + Assert.assertTrue(pipelineManager.isPipelineCreationAllowed()); + + // Then also turn safemode off + pipelineManager.onMessage( + new SCMSafeModeManager.SafeModeStatus(false, true), null); + Assert.assertFalse(pipelineManager.getSafeModeStatus()); + Assert.assertTrue(pipelineManager.isPipelineCreationAllowed()); + pipelineManager.close(); + } + + private void sendPipelineReport( + DatanodeDetails dn, Pipeline pipeline, + PipelineReportHandler pipelineReportHandler, + boolean isLeader) { + SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode report = + TestUtils.getPipelineReportFromDatanode(dn, pipeline.getId(), isLeader); + pipelineReportHandler.onMessage(report, eventQueue); + } +} From 8d74c0ce586d18a1d5cd2b239dbb886b549df296 Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Fri, 26 Jun 2020 07:52:35 +0800 Subject: [PATCH 11/51] HDDS-3652 Add test for SCMRatisResponse. (#1113) --- .../hdds/scm/ha/TestSCMRatisResponse.java | 73 +++++++++++++++++++ 1 file changed, 73 insertions(+) create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java new file mode 100644 index 000000000000..daf08565888c --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.LeaderNotReadyException; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientReply; +import org.apache.ratis.protocol.RaftException; +import org.apache.ratis.protocol.RaftGroupId; +import org.apache.ratis.protocol.RaftGroupMemberId; +import org.apache.ratis.protocol.RaftPeerId; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +/** + * Test for SCMRatisResponse. + */ +public class TestSCMRatisResponse { + private RaftGroupMemberId raftId; + + @Before + public void init() { + raftId = RaftGroupMemberId.valueOf( + RaftPeerId.valueOf("peer"), RaftGroupId.randomId()); + } + + @Test + public void testEncodeAndDecodeSuccess() throws Exception { + SCMRatisResponse response = SCMRatisResponse.decode(new RaftClientReply( + ClientId.randomId(), raftId, 1L, true, Message.EMPTY, + null, 1L, null)); + Assert.assertTrue(response.isSuccess()); + Assert.assertEquals(Message.EMPTY, + SCMRatisResponse.encode(response.getResult())); + } + + @Test + public void testDecodeOperationFailureWithException() throws Exception { + SCMRatisResponse response = SCMRatisResponse.decode(new RaftClientReply( + ClientId.randomId(), raftId, 1L, false, Message.EMPTY, + new LeaderNotReadyException(raftId), 1L, null)); + Assert.assertFalse(response.isSuccess()); + Assert.assertTrue(response.getException() instanceof RaftException); + Assert.assertNull(response.getResult()); + } + + @Test(expected = InvalidProtocolBufferException.class) + public void testEncodeFailureWithNonProto() throws Exception { + // Non proto input + Message message = Message.valueOf("test"); + // Should fail with exception. + SCMRatisResponse.encode(message); + } +} From 7287e1dfc2e00d9eefaec3b046be2a583cacbfed Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Sun, 28 Jun 2020 18:06:27 +0800 Subject: [PATCH 12/51] HDDS-3651 Add tests for SCMRatisRequest. (#1112) --- .../hadoop/hdds/scm/ha/SCMRatisRequest.java | 1 + .../hdds/scm/ha/TestSCMRatisRequest.java | 63 +++++++++++++++++++ 2 files changed, 64 insertions(+) create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java index 3e516537a695..fbba4d0b62ff 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisRequest.java @@ -106,6 +106,7 @@ public Message encode() throws InvalidProtocolBufferException { args.add(argBuilder.build()); } methodBuilder.addAllArgs(args); + requestProtoBuilder.setMethod(methodBuilder.build()); return Message.valueOf( org.apache.ratis.thirdparty.com.google.protobuf.ByteString.copyFrom( requestProtoBuilder.build().toByteArray())); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java new file mode 100644 index 000000000000..52d2ff3245c3 --- /dev/null +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisRequest.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.ha; + +import com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hdds.scm.pipeline.PipelineID; +import org.apache.ratis.protocol.Message; +import org.junit.Assert; +import org.junit.Test; + +import static org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType.PIPELINE; + +/** + * Test for SCMRatisRequest. + */ +public class TestSCMRatisRequest { + + @Test + public void testEncodeAndDecodeSuccess() throws Exception { + PipelineID pipelineID = PipelineID.randomId(); + Object[] args = new Object[] {pipelineID.getProtobuf()}; + String operation = "test"; + SCMRatisRequest request = SCMRatisRequest.of(PIPELINE, operation, args); + Assert.assertEquals(operation, + SCMRatisRequest.decode(request.encode()).getOperation()); + Assert.assertEquals(args[0], + SCMRatisRequest.decode(request.encode()).getArguments()[0]); + } + + @Test(expected = InvalidProtocolBufferException.class) + public void testEncodeWithNonProto() throws Exception{ + PipelineID pipelineID = PipelineID.randomId(); + // Non proto args + Object[] args = new Object[] {pipelineID}; + SCMRatisRequest request = SCMRatisRequest.of(PIPELINE, "test", args); + // Should throw exception there. + request.encode(); + } + + @Test(expected = InvalidProtocolBufferException.class) + public void testDecodeWithNonProto() throws Exception { + // Non proto message + Message message = Message.valueOf("randomMessage"); + // Should throw exception there. + SCMRatisRequest.decode(message); + } +} From 144f9a83390a60cf66a6c7f148218ebaf95db7ef Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 3 Jul 2020 04:08:44 +0200 Subject: [PATCH 13/51] HDDS-3911. Compile error in acceptance test on HDDS-2823 (#1157) --- .../src/main/proto/SCMRatisProtocol.proto | 0 hadoop-hdds/server-scm/pom.xml | 26 ------------------- 2 files changed, 26 deletions(-) rename hadoop-hdds/{server-scm => interface-server}/src/main/proto/SCMRatisProtocol.proto (100%) diff --git a/hadoop-hdds/server-scm/src/main/proto/SCMRatisProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/SCMRatisProtocol.proto similarity index 100% rename from hadoop-hdds/server-scm/src/main/proto/SCMRatisProtocol.proto rename to hadoop-hdds/interface-server/src/main/proto/SCMRatisProtocol.proto diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 8c17aaef4566..92b563f1ade2 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -168,32 +168,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.hadoop - hadoop-maven-plugins - - - compile-protoc - - protoc - - - ${protobuf.version} - - - ${basedir}/src/main/proto - - - - ${basedir}/src/main/proto - - SCMRatisProtocol.proto - - - - - - com.github.spotbugs spotbugs-maven-plugin From 565dabc9237a646a8ddb9c9aa4784c99ed7a56c0 Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Sat, 11 Jul 2020 06:45:21 +0800 Subject: [PATCH 14/51] HDDS-3662 Decouple finalizeAndDestroyPipeline. (#1049) --- .../hadoop/hdds/scm/node/DeadNodeHandler.java | 2 +- .../hdds/scm/node/StaleNodeHandler.java | 2 +- .../scm/pipeline/PipelineActionHandler.java | 4 +- .../hdds/scm/pipeline/PipelineManager.java | 3 +- .../scm/pipeline/PipelineManagerV2Impl.java | 160 +++++++++--------- .../hdds/scm/pipeline/SCMPipelineManager.java | 141 ++++++++------- .../scm/server/SCMClientProtocolServer.java | 2 +- .../hdds/scm/block/TestBlockManager.java | 2 +- .../scm/pipeline/MockPipelineManager.java | 3 +- .../scm/pipeline/TestPipelineManagerImpl.java | 6 +- .../scm/pipeline/TestSCMPipelineManager.java | 6 +- .../scm/pipeline/TestNode2PipelineMap.java | 2 +- .../hdds/scm/pipeline/TestPipelineClose.java | 4 +- .../TestRatisPipelineCreateAndDestroy.java | 6 +- .../hadoop/ozone/container/TestHelper.java | 4 +- .../TestCloseContainerByPipeline.java | 4 +- .../freon/TestFreonWithDatanodeRestart.java | 1 + .../freon/TestFreonWithPipelineDestroy.java | 2 +- .../ozone/recon/TestReconAsPassiveScm.java | 2 +- ...estSCMContainerPlacementPolicyMetrics.java | 2 +- .../scm/pipeline/TestSCMPipelineMetrics.java | 3 +- .../ozone/recon/scm/ReconPipelineManager.java | 9 +- 22 files changed, 180 insertions(+), 190 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java index 17e1fedd9525..fde228691cf9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DeadNodeHandler.java @@ -98,7 +98,7 @@ private void destroyPipelines(final DatanodeDetails datanodeDetails) { .ifPresent(pipelines -> pipelines.forEach(id -> { try { - pipelineManager.finalizeAndDestroyPipeline( + pipelineManager.closePipeline( pipelineManager.getPipeline(id), false); } catch (PipelineNotFoundException ignore) { // Pipeline is not there in pipeline manager, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java index 5530e7305e08..dd8cea366975 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java @@ -60,7 +60,7 @@ public void onMessage(DatanodeDetails datanodeDetails, for (PipelineID pipelineID : pipelineIds) { try { Pipeline pipeline = pipelineManager.getPipeline(pipelineID); - pipelineManager.finalizeAndDestroyPipeline(pipeline, true); + pipelineManager.closePipeline(pipeline, true); } catch (IOException e) { LOG.info("Could not finalize pipeline={} for dn={}", pipelineID, datanodeDetails); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java index 07206943e689..e719adbf057b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java @@ -79,8 +79,8 @@ private void processPipelineAction(final DatanodeDetails datanode, info.getDetailedReason()); if (action == PipelineAction.Action.CLOSE) { - pipelineManager.finalizeAndDestroyPipeline( - pipelineManager.getPipeline(pid), true); + pipelineManager.closePipeline( + pipelineManager.getPipeline(pid), false); } else { LOG.error("unknown pipeline action:{}", action); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java index 48068d82fe56..02e195ffe7f1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java @@ -77,8 +77,7 @@ NavigableSet getContainersInPipeline(PipelineID pipelineID) void openPipeline(PipelineID pipelineId) throws IOException; - void finalizeAndDestroyPipeline(Pipeline pipeline, boolean onTimeout) - throws IOException; + void closePipeline(Pipeline pipeline, boolean onTimeout) throws IOException; void scrubPipeline(ReplicationType type, ReplicationFactor factor) throws IOException; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index 3732add07927..85654aa03164 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.ContainerID; +import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; @@ -47,11 +48,11 @@ import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; /** * SCM Pipeline Manager implementation. @@ -68,6 +69,7 @@ public final class PipelineManagerV2Impl implements PipelineManager { private Scheduler scheduler; private BackgroundPipelineCreator backgroundPipelineCreator; private final ConfigurationSource conf; + private final EventPublisher eventPublisher; // Pipeline Manager MXBean private ObjectName pmInfoBean; private final SCMPipelineMetrics metrics; @@ -80,11 +82,13 @@ public final class PipelineManagerV2Impl implements PipelineManager { private PipelineManagerV2Impl(ConfigurationSource conf, NodeManager nodeManager, StateManager pipelineStateManager, - PipelineFactory pipelineFactory) { + PipelineFactory pipelineFactory, + EventPublisher eventPublisher) { this.lock = new ReentrantReadWriteLock(); this.pipelineFactory = pipelineFactory; this.stateManager = pipelineStateManager; this.conf = conf; + this.eventPublisher = eventPublisher; this.pmInfoBean = MBeans.register("SCMPipelineManager", "SCMPipelineManagerInfo", this); this.metrics = SCMPipelineMetrics.create(); @@ -116,7 +120,7 @@ public static PipelineManagerV2Impl newPipelineManager( nodeManager, stateManager, conf, eventPublisher); // Create PipelineManager PipelineManagerV2Impl pipelineManager = new PipelineManagerV2Impl(conf, - nodeManager, stateManager, pipelineFactory); + nodeManager, stateManager, pipelineFactory, eventPublisher); // Create background thread. Scheduler scheduler = new Scheduler( @@ -157,7 +161,14 @@ public Pipeline createPipeline(ReplicationType type, @Override public Pipeline createPipeline(ReplicationType type, ReplicationFactor factor, List nodes) { - return null; + // This will mostly be used to create dummy pipeline for SimplePipelines. + // We don't update the metrics for SimplePipelines. + lock.writeLock().lock(); + try { + return pipelineFactory.create(type, factor, nodes); + } finally { + lock.writeLock().unlock(); + } } @Override @@ -310,97 +321,77 @@ public void openPipeline(PipelineID pipelineId) throws IOException { } /** - * Finalizes pipeline in the SCM. Removes pipeline and makes rpc call to - * destroy pipeline on the datanodes immediately or after timeout based on the - * value of onTimeout parameter. - * - * @param pipeline - Pipeline to be destroyed - * @param onTimeout - if true pipeline is removed and destroyed on - * datanodes after timeout - * @throws IOException - */ - @Override - public void finalizeAndDestroyPipeline(Pipeline pipeline, boolean onTimeout) - throws IOException { - LOG.info("Destroying pipeline:{}", pipeline); - finalizePipeline(pipeline.getId()); - if (onTimeout) { - long pipelineDestroyTimeoutInMillis = - conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, - ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - scheduler.schedule(() -> destroyPipeline(pipeline), - pipelineDestroyTimeoutInMillis, TimeUnit.MILLISECONDS, LOG, - String.format("Destroy pipeline failed for pipeline:%s", pipeline)); - } else { - destroyPipeline(pipeline); - } - } - - /** - * Moves the pipeline to CLOSED state and sends close container command for - * all the containers in the pipeline. + * Removes the pipeline from the db and pipeline state map. * - * @param pipelineId - ID of the pipeline to be moved to CLOSED state. + * @param pipeline - pipeline to be removed * @throws IOException */ - private void finalizePipeline(PipelineID pipelineId) throws IOException { + protected void removePipeline(Pipeline pipeline) throws IOException { + pipelineFactory.close(pipeline.getType(), pipeline); + PipelineID pipelineID = pipeline.getId(); lock.writeLock().lock(); try { - Pipeline pipeline = stateManager.getPipeline(pipelineId); - if (!pipeline.isClosed()) { - stateManager.updatePipelineState( - pipelineId.getProtobuf(), HddsProtos.PipelineState.PIPELINE_CLOSED); - LOG.info("Pipeline {} moved to CLOSED state", pipeline); - } - - // TODO fire events to datanodes for closing pipelines -// Set containerIDs = stateManager.getContainers(pipelineId); -// for (ContainerID containerID : containerIDs) { -// eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID); -// } - metrics.removePipelineMetrics(pipelineId); + stateManager.removePipeline(pipelineID.getProtobuf()); + metrics.incNumPipelineDestroyed(); + } catch (IOException ex) { + metrics.incNumPipelineDestroyFailed(); + throw ex; } finally { lock.writeLock().unlock(); } } /** - * Removes pipeline from SCM. Sends ratis command to destroy pipeline on all - * the datanodes for ratis pipelines. - * - * @param pipeline - Pipeline to be destroyed + * Fire events to close all containers related to the input pipeline. + * @param pipelineId - ID of the pipeline. * @throws IOException */ - protected void destroyPipeline(Pipeline pipeline) throws IOException { - pipelineFactory.close(pipeline.getType(), pipeline); - // remove the pipeline from the pipeline manager - removePipeline(pipeline.getId()); - triggerPipelineCreation(); + protected void closeContainersForPipeline(final PipelineID pipelineId) + throws IOException { + Set containerIDs = stateManager.getContainers(pipelineId); + for (ContainerID containerID : containerIDs) { + eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID); + } } /** - * Removes the pipeline from the db and pipeline state map. - * - * @param pipelineId - ID of the pipeline to be removed + * put pipeline in CLOSED state. + * @param pipeline - ID of the pipeline. + * @param onTimeout - whether to remove pipeline after some time. * @throws IOException */ - protected void removePipeline(PipelineID pipelineId) throws IOException { + @Override + public void closePipeline(Pipeline pipeline, boolean onTimeout) + throws IOException { + PipelineID pipelineID = pipeline.getId(); lock.writeLock().lock(); try { - stateManager.removePipeline(pipelineId.getProtobuf()); - metrics.incNumPipelineDestroyed(); - } catch (IOException ex) { - metrics.incNumPipelineDestroyFailed(); - throw ex; + if (!pipeline.isClosed()) { + stateManager.updatePipelineState(pipelineID.getProtobuf(), + HddsProtos.PipelineState.PIPELINE_CLOSED); + LOG.info("Pipeline {} moved to CLOSED state", pipeline); + } + metrics.removePipelineMetrics(pipelineID); } finally { lock.writeLock().unlock(); } + // close containers. + closeContainersForPipeline(pipelineID); + if (!onTimeout) { + // close pipeline right away. + removePipeline(pipeline); + } } + /** + * Scrub pipelines. + * @param type Pipeline type + * @param factor Pipeline factor + * @throws IOException + */ @Override public void scrubPipeline(ReplicationType type, ReplicationFactor factor) - throws IOException{ + throws IOException { if (type != ReplicationType.RATIS || factor != ReplicationFactor.THREE) { // Only srub pipeline for RATIS THREE pipeline return; @@ -410,18 +401,29 @@ public void scrubPipeline(ReplicationType type, ReplicationFactor factor) ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - List needToSrubPipelines = stateManager.getPipelines(type, factor, - Pipeline.PipelineState.ALLOCATED).stream() - .filter(p -> currentTime.toEpochMilli() - p.getCreationTimestamp() - .toEpochMilli() >= pipelineScrubTimeoutInMills) - .collect(Collectors.toList()); - for (Pipeline p : needToSrubPipelines) { - LOG.info("Scrubbing pipeline: id: " + p.getId().toString() + - " since it stays at ALLOCATED stage for " + - Duration.between(currentTime, p.getCreationTimestamp()).toMinutes() + - " mins."); - finalizeAndDestroyPipeline(p, false); + + List candidates = stateManager.getPipelines(type, factor); + + for (Pipeline p : candidates) { + // scrub pipelines who stay ALLOCATED for too long. + if (p.getPipelineState() == Pipeline.PipelineState.ALLOCATED && + (currentTime.toEpochMilli() - p.getCreationTimestamp() + .toEpochMilli() >= pipelineScrubTimeoutInMills)) { + LOG.info("Scrubbing pipeline: id: " + p.getId().toString() + + " since it stays at ALLOCATED stage for " + + Duration.between(currentTime, p.getCreationTimestamp()) + .toMinutes() + " mins."); + closePipeline(p, false); + } + // scrub pipelines who stay CLOSED for too long. + if (p.getPipelineState() == Pipeline.PipelineState.CLOSED) { + LOG.info("Scrubbing pipeline: id: " + p.getId().toString() + + " since it stays at CLOSED stage."); + closeContainersForPipeline(p.getId()); + removePipeline(p); + } } + return; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java index 50754dbd808d..365d8ee20239 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java @@ -32,7 +32,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.stream.Collectors; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -382,33 +381,53 @@ public void openPipeline(PipelineID pipelineId) throws IOException { } /** - * Finalizes pipeline in the SCM. Removes pipeline and makes rpc call to - * destroy pipeline on the datanodes immediately or after timeout based on the - * value of onTimeout parameter. - * - * @param pipeline - Pipeline to be destroyed - * @param onTimeout - if true pipeline is removed and destroyed on - * datanodes after timeout + * Fire events to close all containers related to the input pipeline. + * @param pipelineId - ID of the pipeline. + * @throws IOException + */ + protected void closeContainersForPipeline(final PipelineID pipelineId) + throws IOException { + Set containerIDs = stateManager.getContainers(pipelineId); + for (ContainerID containerID : containerIDs) { + eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID); + } + } + + /** + * put pipeline in CLOSED state. + * @param pipeline - ID of the pipeline. + * @param onTimeout - whether to remove pipeline after some time. * @throws IOException */ @Override - public void finalizeAndDestroyPipeline(Pipeline pipeline, boolean onTimeout) + public void closePipeline(Pipeline pipeline, boolean onTimeout) throws IOException { - LOG.info("Destroying pipeline:{}", pipeline); - finalizePipeline(pipeline.getId()); - if (onTimeout) { - long pipelineDestroyTimeoutInMillis = - conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, - ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT_DEFAULT, - TimeUnit.MILLISECONDS); - scheduler.schedule(() -> destroyPipeline(pipeline), - pipelineDestroyTimeoutInMillis, TimeUnit.MILLISECONDS, LOG, - String.format("Destroy pipeline failed for pipeline:%s", pipeline)); - } else { - destroyPipeline(pipeline); + PipelineID pipelineID = pipeline.getId(); + lock.writeLock().lock(); + try { + if (!pipeline.isClosed()) { + stateManager.updatePipelineState(pipelineID, + Pipeline.PipelineState.CLOSED); + LOG.info("Pipeline {} moved to CLOSED state", pipeline); + } + metrics.removePipelineMetrics(pipelineID); + } finally { + lock.writeLock().unlock(); + } + // close containers. + closeContainersForPipeline(pipelineID); + if (!onTimeout) { + // close pipeline right away. + removePipeline(pipeline); } } + /** + * Scrub pipelines. + * @param type Pipeline type + * @param factor Pipeline factor + * @throws IOException + */ @Override public void scrubPipeline(ReplicationType type, ReplicationFactor factor) throws IOException{ @@ -421,18 +440,29 @@ public void scrubPipeline(ReplicationType type, ReplicationFactor factor) ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS); - List needToSrubPipelines = stateManager.getPipelines(type, factor, - Pipeline.PipelineState.ALLOCATED).stream() - .filter(p -> currentTime.toEpochMilli() - p.getCreationTimestamp() - .toEpochMilli() >= pipelineScrubTimeoutInMills) - .collect(Collectors.toList()); - for (Pipeline p : needToSrubPipelines) { - LOG.info("Scrubbing pipeline: id: " + p.getId().toString() + - " since it stays at ALLOCATED stage for " + - Duration.between(currentTime, p.getCreationTimestamp()).toMinutes() + - " mins."); - finalizeAndDestroyPipeline(p, false); + + List candidates = stateManager.getPipelines(type, factor); + + for (Pipeline p : candidates) { + // scrub pipelines who stay ALLOCATED for too long. + if (p.getPipelineState() == Pipeline.PipelineState.ALLOCATED && + (currentTime.toEpochMilli() - p.getCreationTimestamp() + .toEpochMilli() >= pipelineScrubTimeoutInMills)) { + LOG.info("Scrubbing pipeline: id: " + p.getId().toString() + + " since it stays at ALLOCATED stage for " + + Duration.between(currentTime, p.getCreationTimestamp()) + .toMinutes() + " mins."); + closePipeline(p, false); + } + // scrub pipelines who stay CLOSED for too long. + if (p.getPipelineState() == Pipeline.PipelineState.CLOSED) { + LOG.info("Scrubbing pipeline: id: " + p.getId().toString() + + " since it is at CLOSED stage."); + closeContainersForPipeline(p.getId()); + removePipeline(p); + } } + return; } @Override @@ -527,54 +557,21 @@ public void waitPipelineReady(PipelineID pipelineID, long timeout) } } - /** - * Moves the pipeline to CLOSED state and sends close container command for - * all the containers in the pipeline. - * - * @param pipelineId - ID of the pipeline to be moved to CLOSED state. - * @throws IOException - */ - private void finalizePipeline(PipelineID pipelineId) throws IOException { - lock.writeLock().lock(); - try { - stateManager.finalizePipeline(pipelineId); - Set containerIDs = stateManager.getContainers(pipelineId); - for (ContainerID containerID : containerIDs) { - eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID); - } - metrics.removePipelineMetrics(pipelineId); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * Removes pipeline from SCM. Sends ratis command to destroy pipeline on all - * the datanodes for ratis pipelines. - * - * @param pipeline - Pipeline to be destroyed - * @throws IOException - */ - protected void destroyPipeline(Pipeline pipeline) throws IOException { - pipelineFactory.close(pipeline.getType(), pipeline); - // remove the pipeline from the pipeline manager - removePipeline(pipeline.getId()); - triggerPipelineCreation(); - } - /** * Removes the pipeline from the db and pipeline state map. * - * @param pipelineId - ID of the pipeline to be removed + * @param pipeline - pipeline to be removed * @throws IOException */ - protected void removePipeline(PipelineID pipelineId) throws IOException { + protected void removePipeline(Pipeline pipeline) throws IOException { + pipelineFactory.close(pipeline.getType(), pipeline); + PipelineID pipelineID = pipeline.getId(); lock.writeLock().lock(); try { if (pipelineStore != null) { - pipelineStore.delete(pipelineId); - Pipeline pipeline = stateManager.removePipeline(pipelineId); - nodeManager.removePipeline(pipeline); + pipelineStore.delete(pipelineID); + Pipeline pipelineRemoved = stateManager.removePipeline(pipelineID); + nodeManager.removePipeline(pipelineRemoved); metrics.incNumPipelineDestroyed(); } } catch (IOException ex) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index df5c1471de23..ede679d4d2a9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -475,7 +475,7 @@ public void closePipeline(HddsProtos.PipelineID pipelineID) PipelineManager pipelineManager = scm.getPipelineManager(); Pipeline pipeline = pipelineManager.getPipeline(PipelineID.getFromProtobuf(pipelineID)); - pipelineManager.finalizeAndDestroyPipeline(pipeline, true); + pipelineManager.closePipeline(pipeline, true); AUDIT.logWriteSuccess( buildAuditMessageForSuccess(SCMAction.CLOSE_PIPELINE, null) ); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index c89065c15a1c..cf6f0ed735ff 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -417,7 +417,7 @@ public void testMultipleBlockAllocationWithClosedContainer() public void testBlockAllocationWithNoAvailablePipelines() throws IOException, TimeoutException, InterruptedException { for (Pipeline pipeline : pipelineManager.getPipelines()) { - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + pipelineManager.closePipeline(pipeline, false); } Assert.assertEquals(0, pipelineManager.getPipelines(type, factor).size()); Assert.assertNotNull(blockManager diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java index 5dd60824838b..6292ad44376a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java @@ -165,8 +165,7 @@ public void openPipeline(final PipelineID pipelineId) } @Override - public void finalizeAndDestroyPipeline(final Pipeline pipeline, - final boolean onTimeout) + public void closePipeline(final Pipeline pipeline, final boolean onTimeout) throws IOException { stateManager.finalizePipeline(pipeline.getId()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index f8eeb6e48f8d..d5292e3153c0 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -185,7 +185,7 @@ public void testRemovePipeline() throws Exception { Pipeline.PipelineState.OPEN).contains(pipeline)); try { - pipelineManager.removePipeline(pipeline.getId()); + pipelineManager.removePipeline(pipeline); fail(); } catch (IOException ioe) { // Should not be able to remove the OPEN pipeline. @@ -195,7 +195,7 @@ public void testRemovePipeline() throws Exception { } // Destroy pipeline - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + pipelineManager.closePipeline(pipeline, false); try { pipelineManager.getPipeline(pipeline.getId()); fail("Pipeline should not have been retrieved"); @@ -238,7 +238,7 @@ public void testPipelineReport() throws Exception { .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isOpen()); // close the pipeline - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + pipelineManager.closePipeline(pipeline, false); // pipeline report for destroyed pipeline should be ignored nodes.subList(0, 2).forEach(dn -> sendPipelineReport(dn, pipeline, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index 7c2f17e85840..08c1a20f567f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -140,7 +140,7 @@ public void testPipelineReload() throws IOException { // clean up for (Pipeline pipeline : pipelines) { - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + pipelineManager.closePipeline(pipeline, false); } pipelineManager.close(); } @@ -163,7 +163,7 @@ public void testRemovePipeline() throws IOException { pipelineManager.openPipeline(pipeline.getId()); pipelineManager .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1)); - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + pipelineManager.closePipeline(pipeline, false); pipelineManager.close(); // new pipeline manager should not be able to load removed pipelines @@ -228,7 +228,7 @@ public void testPipelineReport() throws IOException { .assertTrue(pipelineManager.getPipeline(pipeline.getId()).isOpen()); // close the pipeline - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + pipelineManager.closePipeline(pipeline, false); // pipeline report for destroyed pipeline should be ignored nodes.subList(0, 2).forEach(dn -> sendPipelineReport(dn, pipeline, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java index 42acb12489f5..532f40035110 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestNode2PipelineMap.java @@ -121,7 +121,7 @@ public void testPipelineMap() throws IOException { Assert.assertEquals(0, set2.size()); pipelineManager - .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false); + .closePipeline(ratisContainer.getPipeline(), false); pipelines = scm.getScmNodeManager() .getPipelines(dns.get(0)); Assert diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java index 346fc0e7aa75..0ee0101f7b3e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineClose.java @@ -137,7 +137,7 @@ public void testPipelineCloseWithClosedContainer() throws IOException { Assert.assertEquals(0, setClosed.size()); pipelineManager - .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false); + .closePipeline(ratisContainer.getPipeline(), false); for (DatanodeDetails dn : ratisContainer.getPipeline().getNodes()) { // Assert that the pipeline has been removed from Node2PipelineMap as well Assert.assertFalse(scm.getScmNodeManager().getPipelines(dn) @@ -153,7 +153,7 @@ public void testPipelineCloseWithOpenContainer() Assert.assertEquals(1, setOpen.size()); pipelineManager - .finalizeAndDestroyPipeline(ratisContainer.getPipeline(), false); + .closePipeline(ratisContainer.getPipeline(), false); GenericTestUtils.waitFor(() -> { try { return containerManager diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java index bd677db65f65..08a29f28623a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java @@ -86,7 +86,7 @@ public void testAutomaticPipelineCreationOnPipelineDestroy() .getPipelines(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN); for (Pipeline pipeline : pipelines) { - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + pipelineManager.closePipeline(pipeline, false); } // make sure two pipelines are created waitForPipelines(2); @@ -108,7 +108,7 @@ public void testAutomaticPipelineCreationDisablingFactorONE() .getPipelines(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, Pipeline.PipelineState.OPEN); for (Pipeline pipeline : pipelines) { - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + pipelineManager.closePipeline(pipeline, false); } // make sure two pipelines are created @@ -152,7 +152,7 @@ public void testPipelineCreationOnNodeRestart() throws Exception { // destroy the existing pipelines for (Pipeline pipeline : pipelines) { - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + pipelineManager.closePipeline(pipeline, false); } if (cluster.getStorageContainerManager() diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java index 12ffce6c2411..fab2ea387d34 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java @@ -206,8 +206,8 @@ public static void waitForPipelineClose(List pipelineList, throws TimeoutException, InterruptedException, IOException { for (Pipeline pipeline1 : pipelineList) { // issue pipeline destroy command - cluster.getStorageContainerManager().getPipelineManager() - .finalizeAndDestroyPipeline(pipeline1, false); + cluster.getStorageContainerManager() + .getPipelineManager().closePipeline(pipeline1, false); } // wait for the pipeline to get destroyed in the datanodes diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index bb2d57f22240..6b40179d1ef8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -204,7 +204,7 @@ public void testCloseContainerViaStandAlone() Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails)); cluster.getStorageContainerManager().getPipelineManager() - .finalizeAndDestroyPipeline(pipeline, false); + .closePipeline(pipeline, false); Thread.sleep(5000); // Pipeline close should not affect a container in CLOSED state Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails)); @@ -307,7 +307,7 @@ public void testQuasiCloseTransitionViaRatis() // close the pipeline cluster.getStorageContainerManager() - .getPipelineManager().finalizeAndDestroyPipeline(pipeline, false); + .getPipelineManager().closePipeline(pipeline, false); // All the containers in OPEN or CLOSING state should transition to // QUASI-CLOSED after pipeline close diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java index fa2536195195..feb03ec5c02d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java @@ -85,6 +85,7 @@ public static void init() throws Exception { RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." + "watch.request.timeout", 3, TimeUnit.SECONDS); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 5); cluster = MiniOzoneCluster.newBuilder(conf) .setHbProcessorInterval(1000) .setHbInterval(1000) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java index 5150fd4d8f2f..109a49e4c046 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithPipelineDestroy.java @@ -105,6 +105,6 @@ private void destroyPipeline() throws Exception { PipelineManager pipelineManager = cluster.getStorageContainerManager().getPipelineManager(); Pipeline pipeline = pipelineManager.getPipeline(id); - pipelineManager.finalizeAndDestroyPipeline(pipeline, false); + pipelineManager.closePipeline(pipeline, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java index 6e3dfe3f8b19..9092cc5d42f1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java @@ -181,7 +181,7 @@ public void testReconRestart() throws Exception { .filter(p -> !p.getId().equals(containerInfo.getPipelineID())) .findFirst(); assertTrue(pipelineToClose.isPresent()); - scmPipelineManager.finalizeAndDestroyPipeline(pipelineToClose.get(), false); + scmPipelineManager.closePipeline(pipelineToClose.get(), false); // Start Recon cluster.startRecon(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java index 4025acac439f..5edd392b5f5a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMContainerPlacementPolicyMetrics.java @@ -124,7 +124,7 @@ public void test() throws IOException { .collect(Collectors.toList()); Pipeline targetPipeline = pipelines.get(0); List nodes = targetPipeline.getNodes(); - manager.finalizeAndDestroyPipeline(pipelines.get(0), true); + manager.closePipeline(pipelines.get(0), true); // kill datanode to trigger under-replicated container replication cluster.shutdownHddsDatanode(nodes.get(0)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java index 250a2b097e58..a1a816dce49b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/pipeline/TestSCMPipelineMetrics.java @@ -92,8 +92,7 @@ public void testPipelineDestroy() { try { cluster.getStorageContainerManager() .getPipelineManager() - .finalizeAndDestroyPipeline( - pipeline.get(), false); + .closePipeline(pipeline.get(), false); } catch (IOException e) { e.printStackTrace(); Assert.fail(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java index beed59195c90..a96212df15ea 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconPipelineManager.java @@ -62,13 +62,6 @@ public void triggerPipelineCreation() { // Don't do anything in Recon. } - @Override - protected void destroyPipeline(Pipeline pipeline) throws IOException { - // remove the pipeline from the pipeline manager - removePipeline(pipeline.getId()); - } - - /** * Bootstrap Recon's pipeline metadata with that from SCM. * @param pipelinesFromScm pipelines from SCM. @@ -124,7 +117,7 @@ public void removeInvalidPipelines(List pipelinesFromScm) { } try { LOG.info("Removing invalid pipeline {} from Recon.", pipelineID); - finalizeAndDestroyPipeline(p, false); + closePipeline(p, false); } catch (IOException e) { LOG.warn("Unable to remove pipeline {}", pipelineID, e); } From 8a8c9eb41809409411251cc999a035f8c4cf2fa2 Mon Sep 17 00:00:00 2001 From: GlenGeng Date: Wed, 15 Jul 2020 01:33:51 +0800 Subject: [PATCH 15/51] HDDS-3191: switch from SCMPipelineManager to PipelineManagerV2Impl (#1151) --- .../hdds/scm/ha/SCMHAConfiguration.java | 14 ++-- .../scm/metadata/SCMMetadataStoreImpl.java | 4 + .../scm/pipeline/PipelineManagerV2Impl.java | 14 +++- .../hdds/scm/server/SCMConfigurator.java | 22 ++++- .../scm/server/StorageContainerManager.java | 26 +++++- .../apache/hadoop/hdds/scm/HddsTestUtils.java | 25 ------ .../org/apache/hadoop/hdds/scm/TestUtils.java | 21 ++++- .../hdds/scm/block/TestBlockManager.java | 18 +++- .../TestCloseContainerEventHandler.java | 14 +++- .../container/TestContainerStateManager.java | 3 +- .../container/TestSCMContainerManager.java | 14 ++-- .../hdds/scm/node/TestContainerPlacement.java | 12 ++- .../hdds/scm/node/TestDeadNodeHandler.java | 9 +- .../hdds/scm/node/TestSCMNodeManager.java | 3 +- .../hdds/scm/node/TestStatisticsUpdate.java | 3 +- .../pipeline/MockRatisPipelineProvider.java | 23 ++--- .../scm/pipeline/TestPipelineManagerImpl.java | 7 +- .../TestHealthyPipelineSafeModeRule.java | 53 ++++++++++-- .../TestOneReplicaPipelineSafeModeRule.java | 15 ++-- .../scm/safemode/TestSCMSafeModeManager.java | 84 ++++++++++++++----- .../server/TestSCMBlockProtocolServer.java | 2 + .../scm/server/ratis/TestSCMRatisServer.java | 4 +- .../scm/server/ratis/TestSCMStateMachine.java | 4 +- .../hadoop/ozone/MiniOzoneClusterImpl.java | 5 +- .../hadoop/ozone/TestSecureOzoneCluster.java | 20 ++--- .../ozone/TestStorageContainerManager.java | 6 +- .../hadoop/ozone/om/TestKeyManagerImpl.java | 2 + .../ozone/om/TestOzoneManagerHAWithData.java | 10 +-- 28 files changed, 295 insertions(+), 142 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java index 1cb8c65675f8..5fbf2688b1aa 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAConfiguration.java @@ -53,7 +53,7 @@ public class SCMHAConfiguration { private String ratisBindHost = "0.0.0.0"; @Config(key = "ratis.bind.port", - type = ConfigType.STRING, + type = ConfigType.INT, defaultValue = "9865", tags = {OZONE, SCM, HA, RATIS}, description = "Port used by SCM for Ratis Server." @@ -78,7 +78,7 @@ public class SCMHAConfiguration { description = "The size of the raft segment used by Apache Ratis on" + " SCM. (16 KB by default)" ) - private long raftSegmentSize = 16L * 1024L; + private double raftSegmentSize = 16L * 1024L; @Config(key = "ratis.segment.preallocated.size", type = ConfigType.SIZE, @@ -87,7 +87,7 @@ public class SCMHAConfiguration { description = "The size of the buffer which is preallocated for" + " raft segment used by Apache Ratis on SCM.(16 KB by default)" ) - private long raftSegmentPreAllocatedSize = 16 * 1024; + private double raftSegmentPreAllocatedSize = 16 * 1024; @Config(key = "ratis.log.appender.queue.num-elements", type = ConfigType.INT, @@ -103,7 +103,7 @@ public class SCMHAConfiguration { tags = {SCM, OZONE, HA, RATIS}, description = "Byte limit for Raft's Log Worker queue." ) - private int raftLogAppenderQueueByteLimit = 32 * 1024 * 1024; + private double raftLogAppenderQueueByteLimit = 32 * 1024 * 1024; @Config(key = "ratis.log.purge.gap", type = ConfigType.INT, @@ -174,11 +174,11 @@ public String getRatisRpcType() { } public long getRaftSegmentSize() { - return raftSegmentSize; + return (long)raftSegmentSize; } public long getRaftSegmentPreAllocatedSize() { - return raftSegmentPreAllocatedSize; + return (long)raftSegmentPreAllocatedSize; } public int getRaftLogAppenderQueueNum() { @@ -186,7 +186,7 @@ public int getRaftLogAppenderQueueNum() { } public int getRaftLogAppenderQueueByteLimit() { - return raftLogAppenderQueueByteLimit; + return (int)raftLogAppenderQueueByteLimit; } public int getRaftLogPurgeGap() { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java index 4ab545776080..0a609c7a0f4e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreImpl.java @@ -102,7 +102,11 @@ public void start(OzoneConfiguration config) pipelineTable = PIPELINES.getTable(store); + checkTableStatus(pipelineTable, PIPELINES.getName()); + containerTable = CONTAINERS.getTable(store); + + checkTableStatus(containerTable, CONTAINERS.getName()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index 85654aa03164..12417457e133 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -160,7 +160,7 @@ public Pipeline createPipeline(ReplicationType type, @Override public Pipeline createPipeline(ReplicationType type, ReplicationFactor factor, - List nodes) { + List nodes) { // This will mostly be used to create dummy pipeline for SimplePipelines. // We don't update the metrics for SimplePipelines. lock.writeLock().lock(); @@ -207,6 +207,7 @@ public List getPipelines() { @Override public List getPipelines(ReplicationType type) { + lock.readLock().lock(); try { return stateManager.getPipelines(type); } finally { @@ -582,6 +583,17 @@ public void allowPipelineCreation() { this.pipelineCreationAllowed.set(true); } + @VisibleForTesting + public void setPipelineProvider(ReplicationType replicationType, + PipelineProvider provider) { + pipelineFactory.setProvider(replicationType, provider); + } + + @VisibleForTesting + public StateManager getStateManager() { + return stateManager; + } + private void setBackgroundPipelineCreator( BackgroundPipelineCreator backgroundPipelineCreator) { this.backgroundPipelineCreator = backgroundPipelineCreator; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java index 9bbabd11ee0f..d9b511da2631 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMConfigurator.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.scm.block.BlockManager; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -51,7 +52,8 @@ * ReplicationManager replicationManager; * SCMSafeModeManager scmSafeModeManager; * CertificateServer certificateServer; - * SCMMetadata scmMetadataStore. + * SCMMetadata scmMetadataStore; + * SCMHAManager scmHAManager. * * If any of these are *not* specified then the default version of these * managers are used by SCM. @@ -67,6 +69,7 @@ public final class SCMConfigurator { private CertificateServer certificateServer; private SCMMetadataStore metadataStore; private NetworkTopology networkTopology; + private SCMHAManager scmHAManager; /** * Allows user to specify a version of Node manager to use with this SCM. @@ -148,6 +151,15 @@ public void setNetworkTopology(NetworkTopology networkTopology) { this.networkTopology = networkTopology; } + /** + * Allows user to specify a custom version of SCMHAManager to be + * used with this SCM. + * @param scmHaMgr - SCMHAManager. + */ + public void setSCMHAManager(SCMHAManager scmHaMgr) { + this.scmHAManager = scmHaMgr; + } + /** * Gets SCM Node Manager. * @return Node Manager. @@ -219,4 +231,12 @@ public SCMMetadataStore getMetadataStore() { public NetworkTopology getNetworkTopology() { return networkTopology; } + + /** + * Get SCMHAManager. + * @return SCMHAManager. + */ + public SCMHAManager getSCMHAManager() { + return scmHAManager; + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 20d0480a428b..4ba40f960a45 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -51,6 +51,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState; import org.apache.hadoop.hdds.scm.PlacementPolicy; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; +import org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisServer; @@ -94,7 +96,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineActionHandler; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.x509.SecurityConfig; @@ -170,6 +172,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl private final SCMStorageConfig scmStorageConfig; private SCMMetadataStore scmMetadataStore; + private SCMHAManager scmHAManager; private final EventQueue eventQueue; /* @@ -237,7 +240,7 @@ private StorageContainerManager(OzoneConfiguration conf) * @param configurator - configurator */ private StorageContainerManager(OzoneConfiguration conf, - SCMConfigurator configurator) + SCMConfigurator configurator) throws IOException, AuthenticationException { super(HddsVersionInfo.HDDS_VERSION_INFO); @@ -439,6 +442,12 @@ private void initializeSystemManagers(OzoneConfiguration conf, clusterMap = new NetworkTopologyImpl(conf); } + if (configurator.getSCMHAManager() != null) { + scmHAManager = configurator.getSCMHAManager(); + } else { + scmHAManager = new SCMHAManagerImpl(conf); + } + if(configurator.getScmNodeManager() != null) { scmNodeManager = configurator.getScmNodeManager(); } else { @@ -455,7 +464,10 @@ private void initializeSystemManagers(OzoneConfiguration conf, pipelineManager = configurator.getPipelineManager(); } else { pipelineManager = - new SCMPipelineManager(conf, scmNodeManager, + PipelineManagerV2Impl.newPipelineManager( + conf, + scmHAManager, + scmNodeManager, scmMetadataStore.getPipelineTable(), eventQueue); } @@ -825,6 +837,8 @@ public void start() throws IOException { scmRatisServer.start(); } + scmHAManager.start(); + ms = HddsServerUtil .initializeMetrics(configuration, "StorageContainerManager"); @@ -957,6 +971,12 @@ public void stop() { ms.stop(); } + try { + scmHAManager.shutdown(); + } catch (Exception ex) { + LOG.error("SCM HA Manager stop failed", ex); + } + scmSafeModeManager.stop(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java index 4039b5a68f24..d4d11ffd6218 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java @@ -17,23 +17,16 @@ */ package org.apache.hadoop.hdds.scm; -import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.UUID; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.MockDatanodeDetails; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer .NodeRegistrationContainerReport; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.common.Storage; -import org.apache.hadoop.security.authentication.client.AuthenticationException; /** * Stateless helper functions for Hdds tests. @@ -74,24 +67,6 @@ private HddsTestUtils() { TestUtils.getContainerReports(containers)); } - public static StorageContainerManager getScm(OzoneConfiguration conf) - throws IOException, AuthenticationException { - conf.set(ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(ScmConfigKeys.OZONE_SCM_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - if(scmStore.getState() != Storage.StorageState.INITIALIZED) { - String clusterId = UUID.randomUUID().toString(); - String scmId = UUID.randomUUID().toString(); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - } - return StorageContainerManager.createSCM(conf); - } - /** * Creates list of ContainerInfo. * diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java index 5d1ed4694680..ec80dfe4820f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java @@ -34,6 +34,7 @@ .StorageContainerDatanodeProtocolProtos.PipelineReportsProto; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; @@ -455,6 +456,22 @@ public static void quasiCloseContainer(ContainerManager containerManager, } + /** + * Construct and returns StorageContainerManager instance using the given + * configuration. + * + * @param conf OzoneConfiguration + * @return StorageContainerManager instance + * @throws IOException + * @throws AuthenticationException + */ + public static StorageContainerManager getScmSimple(OzoneConfiguration conf) + throws IOException, AuthenticationException { + SCMConfigurator configurator = new SCMConfigurator(); + configurator.setSCMHAManager(MockSCMHAManager.getInstance()); + return StorageContainerManager.createSCM(conf, configurator); + } + /** * Construct and returns StorageContainerManager instance using the given * configuration. The ports used by this StorageContainerManager are @@ -467,7 +484,9 @@ public static void quasiCloseContainer(ContainerManager containerManager, */ public static StorageContainerManager getScm(OzoneConfiguration conf) throws IOException, AuthenticationException { - return getScm(conf, new SCMConfigurator()); + SCMConfigurator configurator = new SCMConfigurator(); + configurator.setSCMHAManager(MockSCMHAManager.getInstance()); + return getScm(conf, configurator); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index cf6f0ed735ff..4f8c1d69417f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -41,12 +41,14 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.server.SCMConfigurator; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -76,8 +78,9 @@ public class TestBlockManager { private StorageContainerManager scm; private SCMContainerManager mapping; private MockNodeManager nodeManager; - private SCMPipelineManager pipelineManager; + private PipelineManagerV2Impl pipelineManager; private BlockManagerImpl blockManager; + private SCMHAManager scmHAManager; private final static long DEFAULT_BLOCK_SIZE = 128 * MB; private static HddsProtos.ReplicationFactor factor; private static HddsProtos.ReplicationType type; @@ -105,14 +108,20 @@ public void setUp() throws Exception { conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 5, TimeUnit.SECONDS); - // Override the default Node Manager in SCM with this Mock Node Manager. + // Override the default Node Manager and SCMHAManager + // in SCM with the Mock one. nodeManager = new MockNodeManager(true, 10); + scmHAManager = MockSCMHAManager.getInstance(); + eventQueue = new EventQueue(); scmMetadataStore = new SCMMetadataStoreImpl(conf); scmMetadataStore.start(conf); pipelineManager = - new SCMPipelineManager(conf, nodeManager, + PipelineManagerV2Impl.newPipelineManager( + conf, + scmHAManager, + nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); pipelineManager.allowPipelineCreation(); @@ -140,6 +149,7 @@ public void emitSafeModeStatus() { configurator.setContainerManager(containerManager); configurator.setScmSafeModeManager(safeModeManager); configurator.setMetadataStore(scmMetadataStore); + configurator.setSCMHAManager(scmHAManager); scm = TestUtils.getScm(conf, configurator); // Initialize these fields so that the tests can pass. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index b080ea1a8204..daa97266d8e8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -28,11 +28,12 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl; import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; @@ -55,7 +56,7 @@ public class TestCloseContainerEventHandler { private static OzoneConfiguration configuration; private static MockNodeManager nodeManager; - private static SCMPipelineManager pipelineManager; + private static PipelineManagerV2Impl pipelineManager; private static SCMContainerManager containerManager; private static long size; private static File testDir; @@ -77,8 +78,13 @@ public static void setUp() throws Exception { scmMetadataStore = new SCMMetadataStoreImpl(configuration); pipelineManager = - new SCMPipelineManager(configuration, nodeManager, - scmMetadataStore.getPipelineTable(), eventQueue); + PipelineManagerV2Impl.newPipelineManager( + configuration, + MockSCMHAManager.getInstance(), + nodeManager, + scmMetadataStore.getPipelineTable(), + eventQueue); + pipelineManager.allowPipelineCreation(); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java index 859eef7ca9a0..b8bae2225257 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -107,7 +106,7 @@ private void addReplica(ContainerInfo cont, DatanodeDetails node) private ContainerInfo allocateContainer() throws IOException { - PipelineManager pipelineManager = Mockito.mock(SCMPipelineManager.class); + PipelineManager pipelineManager = Mockito.mock(PipelineManager.class); Pipeline pipeline = Pipeline.newBuilder().setState(Pipeline.PipelineState.CLOSED) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java index 12c62a956993..25650762bc59 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java @@ -42,10 +42,11 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; import org.apache.hadoop.hdds.scm.XceiverClientManager; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; @@ -66,7 +67,7 @@ public class TestSCMContainerManager { private static SCMContainerManager containerManager; private static MockNodeManager nodeManager; - private static SCMPipelineManager pipelineManager; + private static PipelineManagerV2Impl pipelineManager; private static File testDir; private static XceiverClientManager xceiverClientManager; private static Random random; @@ -92,9 +93,12 @@ public static void setUp() throws Exception { } nodeManager = new MockNodeManager(true, 10); SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(conf); - pipelineManager = - new SCMPipelineManager(conf, nodeManager, - scmMetadataStore.getPipelineTable(), new EventQueue()); + pipelineManager = PipelineManagerV2Impl.newPipelineManager( + conf, + MockSCMHAManager.getInstance(), + nodeManager, + scmMetadataStore.getPipelineTable(), + new EventQueue()); pipelineManager.allowPipelineCreation(); containerManager = new SCMContainerManager(conf, scmMetadataStore.getContainerTable(), diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index 797709ecc5f2..7fea0c52d736 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -36,10 +36,11 @@ import org.apache.hadoop.hdds.scm.container.SCMContainerManager; import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.OzoneConsts; @@ -115,8 +116,13 @@ SCMContainerManager createContainerManager(ConfigurationSource config, EventQueue eventQueue = new EventQueue(); PipelineManager pipelineManager = - new SCMPipelineManager(config, scmNodeManager, - scmMetadataStore.getPipelineTable(), eventQueue); + PipelineManagerV2Impl.newPipelineManager( + config, + MockSCMHAManager.getInstance(), + scmNodeManager, + scmMetadataStore.getPipelineTable(), + eventQueue); + return new SCMContainerManager(config, scmMetadataStore.getContainerTable(), scmMetadataStore.getStore(), pipelineManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index 6a6d3284465b..bfb211dbd97c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -40,7 +40,6 @@ .StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -51,7 +50,7 @@ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl; import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher .NodeReportFromDatanode; @@ -78,7 +77,7 @@ public class TestDeadNodeHandler { private SCMNodeManager nodeManager; private ContainerManager containerManager; private NodeReportHandler nodeReportHandler; - private SCMPipelineManager pipelineManager; + private PipelineManagerV2Impl pipelineManager; private DeadNodeHandler deadNodeHandler; private EventPublisher publisher; private EventQueue eventQueue; @@ -93,10 +92,10 @@ public void setup() throws IOException, AuthenticationException { TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir); eventQueue = new EventQueue(); - scm = HddsTestUtils.getScm(conf); + scm = TestUtils.getScm(conf); nodeManager = (SCMNodeManager) scm.getScmNodeManager(); pipelineManager = - (SCMPipelineManager)scm.getPipelineManager(); + (PipelineManagerV2Impl)scm.getPipelineManager(); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, pipelineManager.getStateManager(), conf); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index b167a38b7254..d06b641f517f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto; -import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; @@ -137,7 +136,7 @@ OzoneConfiguration getConf() { SCMNodeManager createNodeManager(OzoneConfiguration config) throws IOException, AuthenticationException { - scm = HddsTestUtils.getScm(config); + scm = TestUtils.getScm(config); return (SCMNodeManager) scm.getScmNodeManager(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java index a6b033923fc4..0ebab870d515 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java @@ -26,7 +26,6 @@ .StorageContainerDatanodeProtocolProtos.StorageReportProto; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.NodeReportProto; -import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; @@ -68,7 +67,7 @@ public void setup() throws IOException, AuthenticationException { conf.set(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, "1s"); conf.set(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, "2s"); final EventQueue eventQueue = new EventQueue(); - final StorageContainerManager scm = HddsTestUtils.getScm(conf); + final StorageContainerManager scm = TestUtils.getScm(conf); nodeManager = scm.getScmNodeManager(); final DeadNodeHandler deadNodeHandler = new DeadNodeHandler( nodeManager, Mockito.mock(PipelineManager.class), diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java index e355877e269c..49cac8b9f034 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java @@ -34,7 +34,6 @@ public class MockRatisPipelineProvider extends RatisPipelineProvider { private boolean autoOpenPipeline; - private boolean isHealthy; public MockRatisPipelineProvider( NodeManager nodeManager, StateManager stateManager, @@ -50,14 +49,6 @@ public MockRatisPipelineProvider(NodeManager nodeManager, super(nodeManager, stateManager, conf, new EventQueue()); } - public MockRatisPipelineProvider(NodeManager nodeManager, - StateManager stateManager, - ConfigurationSource conf, - boolean isHealthy) { - super(nodeManager, stateManager, conf, new EventQueue()); - this.isHealthy = isHealthy; - } - public MockRatisPipelineProvider( NodeManager nodeManager, StateManager stateManager, ConfigurationSource conf, EventPublisher eventPublisher) { @@ -84,16 +75,18 @@ public Pipeline create(HddsProtos.ReplicationFactor factor) .setFactor(factor) .setNodes(initialPipeline.getNodes()) .build(); - if (isHealthy) { - for (DatanodeDetails datanodeDetails : initialPipeline.getNodes()) { - pipeline.reportDatanode(datanodeDetails); - } - pipeline.setLeaderId(initialPipeline.getFirstNode().getUuid()); - } return pipeline; } } + public static void markPipelineHealthy(Pipeline pipeline) + throws IOException { + for (DatanodeDetails datanodeDetails : pipeline.getNodes()) { + pipeline.reportDatanode(datanodeDetails); + } + pipeline.setLeaderId(pipeline.getFirstNode().getUuid()); + } + @Override public void shutdown() { // Do nothing. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index d5292e3153c0..e1f9104c9d15 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.MockNodeManager; -import org.apache.hadoop.hdds.scm.container.TestContainerManagerImpl; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; @@ -73,7 +72,7 @@ public class TestPipelineManagerImpl { public void init() throws Exception { conf = SCMTestUtils.getConf(); testDir = GenericTestUtils.getTestDir( - TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID()); + TestPipelineManagerImpl.class.getSimpleName() + UUID.randomUUID()); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); nodeManager = new MockNodeManager(true, 20); @@ -200,8 +199,8 @@ public void testRemovePipeline() throws Exception { pipelineManager.getPipeline(pipeline.getId()); fail("Pipeline should not have been retrieved"); } catch (PipelineNotFoundException e) { - // There should be no pipeline in pipelineManager. - Assert.assertEquals(0, pipelineManager.getPipelines().size()); + // There may be pipelines created by BackgroundPipelineCreator + // exist in pipelineManager, just ignore them. } pipelineManager.close(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java index 7a40e3e47c9e..f35318d9c3bc 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java @@ -31,12 +31,13 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.test.GenericTestUtils; @@ -70,8 +71,13 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines() SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(config); try { - SCMPipelineManager pipelineManager = new SCMPipelineManager(config, - nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); + PipelineManagerV2Impl pipelineManager = + PipelineManagerV2Impl.newPipelineManager( + config, + MockSCMHAManager.getInstance(), + nodeManager, + scmMetadataStore.getPipelineTable(), + eventQueue); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, pipelineManager.getStateManager(), config); @@ -114,13 +120,18 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(config); try { - SCMPipelineManager pipelineManager = new SCMPipelineManager(config, - nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); + PipelineManagerV2Impl pipelineManager = + PipelineManagerV2Impl.newPipelineManager( + config, + MockSCMHAManager.getInstance(), + nodeManager, + scmMetadataStore.getPipelineTable(), + eventQueue); pipelineManager.allowPipelineCreation(); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), config, true); + pipelineManager.getStateManager(), config); pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); @@ -135,6 +146,16 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + // Mark pipeline healthy + pipeline1 = pipelineManager.getPipeline(pipeline1.getId()); + MockRatisPipelineProvider.markPipelineHealthy(pipeline1); + + pipeline2 = pipelineManager.getPipeline(pipeline2.getId()); + MockRatisPipelineProvider.markPipelineHealthy(pipeline2); + + pipeline3 = pipelineManager.getPipeline(pipeline3.getId()); + MockRatisPipelineProvider.markPipelineHealthy(pipeline3); + SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( config, containers, pipelineManager, eventQueue); @@ -190,13 +211,18 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(config); try { - SCMPipelineManager pipelineManager = new SCMPipelineManager(config, - nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); + PipelineManagerV2Impl pipelineManager = + PipelineManagerV2Impl.newPipelineManager( + config, + MockSCMHAManager.getInstance(), + nodeManager, + scmMetadataStore.getPipelineTable(), + eventQueue); pipelineManager.allowPipelineCreation(); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), config, true); + pipelineManager.getStateManager(), config); pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); @@ -211,6 +237,15 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + // Mark pipeline healthy + pipeline1 = pipelineManager.getPipeline(pipeline1.getId()); + MockRatisPipelineProvider.markPipelineHealthy(pipeline1); + + pipeline2 = pipelineManager.getPipeline(pipeline2.getId()); + MockRatisPipelineProvider.markPipelineHealthy(pipeline2); + + pipeline3 = pipelineManager.getPipeline(pipeline3.getId()); + MockRatisPipelineProvider.markPipelineHealthy(pipeline3); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( config, containers, pipelineManager, eventQueue); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java index 4e1cf6fcb2d3..5aa67a39007a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java @@ -27,12 +27,13 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.test.GenericTestUtils; @@ -50,7 +51,7 @@ public class TestOneReplicaPipelineSafeModeRule { @Rule public TemporaryFolder folder = new TemporaryFolder(); private OneReplicaPipelineSafeModeRule rule; - private SCMPipelineManager pipelineManager; + private PipelineManagerV2Impl pipelineManager; private EventQueue eventQueue; private void setup(int nodes, int pipelineFactorThreeCount, @@ -72,10 +73,12 @@ private void setup(int nodes, int pipelineFactorThreeCount, SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(ozoneConfiguration); - pipelineManager = - new SCMPipelineManager(ozoneConfiguration, mockNodeManager, - scmMetadataStore.getPipelineTable(), - eventQueue); + pipelineManager = PipelineManagerV2Impl.newPipelineManager( + ozoneConfiguration, + MockSCMHAManager.getInstance(), + mockNodeManager, + scmMetadataStore.getPipelineTable(), + eventQueue); pipelineManager.allowPipelineCreation(); PipelineProvider mockRatisProvider = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index 418d945f6383..935dc7761022 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -35,13 +35,14 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore; import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreImpl; import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider; -import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager; +import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; @@ -295,8 +296,13 @@ public void testFailWithIncorrectValueForHealthyPipelinePercent() OzoneConfiguration conf = createConf(100, 0.9); MockNodeManager mockNodeManager = new MockNodeManager(true, 10); - PipelineManager pipelineManager = new SCMPipelineManager(conf, - mockNodeManager, scmMetadataStore.getPipelineTable(), queue); + PipelineManager pipelineManager = + PipelineManagerV2Impl.newPipelineManager( + conf, + MockSCMHAManager.getInstance(), + mockNodeManager, + scmMetadataStore.getPipelineTable(), + queue); scmSafeModeManager = new SCMSafeModeManager( conf, containers, pipelineManager, queue); fail("testFailWithIncorrectValueForHealthyPipelinePercent"); @@ -313,8 +319,13 @@ public void testFailWithIncorrectValueForOneReplicaPipelinePercent() OzoneConfiguration conf = createConf(0.9, 200); MockNodeManager mockNodeManager = new MockNodeManager(true, 10); - PipelineManager pipelineManager = new SCMPipelineManager(conf, - mockNodeManager, scmMetadataStore.getPipelineTable(), queue); + PipelineManager pipelineManager = + PipelineManagerV2Impl.newPipelineManager( + conf, + MockSCMHAManager.getInstance(), + mockNodeManager, + scmMetadataStore.getPipelineTable(), + queue); scmSafeModeManager = new SCMSafeModeManager( conf, containers, pipelineManager, queue); fail("testFailWithIncorrectValueForOneReplicaPipelinePercent"); @@ -330,8 +341,13 @@ public void testFailWithIncorrectValueForSafeModePercent() throws Exception { OzoneConfiguration conf = createConf(0.9, 0.1); conf.setDouble(HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, -1.0); MockNodeManager mockNodeManager = new MockNodeManager(true, 10); - PipelineManager pipelineManager = new SCMPipelineManager(conf, - mockNodeManager, scmMetadataStore.getPipelineTable(), queue); + PipelineManager pipelineManager = + PipelineManagerV2Impl.newPipelineManager( + conf, + MockSCMHAManager.getInstance(), + mockNodeManager, + scmMetadataStore.getPipelineTable(), + queue); scmSafeModeManager = new SCMSafeModeManager( conf, containers, pipelineManager, queue); fail("testFailWithIncorrectValueForSafeModePercent"); @@ -354,18 +370,29 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( containers.addAll(HddsTestUtils.getContainerInfo(containerCount)); MockNodeManager mockNodeManager = new MockNodeManager(true, nodeCount); - SCMPipelineManager pipelineManager = new SCMPipelineManager(conf, - mockNodeManager, scmMetadataStore.getPipelineTable(), queue); + PipelineManagerV2Impl pipelineManager = + PipelineManagerV2Impl.newPipelineManager( + conf, + MockSCMHAManager.getInstance(), + mockNodeManager, + scmMetadataStore.getPipelineTable(), + queue); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(mockNodeManager, - pipelineManager.getStateManager(), config, true); + pipelineManager.getStateManager(), config); pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); pipelineManager.allowPipelineCreation(); - for (int i=0; i < pipelineCount; i++) { - pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, + for (int i = 0; i < pipelineCount; i++) { + // Create pipeline + Pipeline pipeline = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + + // Mark pipeline healthy + pipeline = pipelineManager.getPipeline(pipeline.getId()); + MockRatisPipelineProvider.markPipelineHealthy(pipeline); } for (ContainerInfo container : containers) { @@ -449,7 +476,7 @@ private void checkOpen(int expectedCount) throws Exception { 1000, 5000); } - private void firePipelineEvent(SCMPipelineManager pipelineManager, + private void firePipelineEvent(PipelineManager pipelineManager, Pipeline pipeline) throws Exception { pipelineManager.openPipeline(pipeline.getId()); queue.fireEvent(SCMEvents.OPEN_PIPELINE, @@ -571,12 +598,17 @@ public void testSafeModePipelineExitRule() throws Exception { config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); - SCMPipelineManager pipelineManager = new SCMPipelineManager(config, - nodeManager, scmMetadataStore.getPipelineTable(), queue); + PipelineManagerV2Impl pipelineManager = + PipelineManagerV2Impl.newPipelineManager( + config, + MockSCMHAManager.getInstance(), + nodeManager, + scmMetadataStore.getPipelineTable(), + queue); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), config, true); + pipelineManager.getStateManager(), config); pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); pipelineManager.allowPipelineCreation(); @@ -585,6 +617,9 @@ public void testSafeModePipelineExitRule() throws Exception { HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + pipeline = pipelineManager.getPipeline(pipeline.getId()); + MockRatisPipelineProvider.markPipelineHealthy(pipeline); + scmSafeModeManager = new SCMSafeModeManager( config, containers, pipelineManager, queue); @@ -625,13 +660,17 @@ public void testPipelinesNotCreatedUntilPreCheckPasses() config.setBoolean( HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK, true); - SCMPipelineManager pipelineManager = new SCMPipelineManager(config, - nodeManager, scmMetadataStore.getPipelineTable(), queue); - + PipelineManagerV2Impl pipelineManager = + PipelineManagerV2Impl.newPipelineManager( + config, + MockSCMHAManager.getInstance(), + nodeManager, + scmMetadataStore.getPipelineTable(), + queue); PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, - pipelineManager.getStateManager(), config, true); + pipelineManager.getStateManager(), config); pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); @@ -668,6 +707,11 @@ public void testPipelinesNotCreatedUntilPreCheckPasses() Pipeline pipeline = pipelineManager.createPipeline( HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + + // Mark pipeline healthy + pipeline = pipelineManager.getPipeline(pipeline.getId()); + MockRatisPipelineProvider.markPipelineHealthy(pipeline); + firePipelineEvent(pipelineManager, pipeline); queue.processAll(5000); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java index 349e705956df..f4553abd736c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; import org.apache.hadoop.hdds.scm.TestUtils; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocolServerSideTranslatorPB; @@ -58,6 +59,7 @@ public void setUp() throws Exception { File dir = GenericTestUtils.getRandomizedTestDir(); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); SCMConfigurator configurator = new SCMConfigurator(); + configurator.setSCMHAManager(MockSCMHAManager.getInstance()); scm = TestUtils.getScm(config, configurator); scm.start(); scm.exitSafeMode(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java index 40799655b44e..8a233aeca13e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java @@ -20,8 +20,8 @@ import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -72,7 +72,7 @@ public void init() throws Exception { // Standalone SCM Ratis server initSCM(); - scm = HddsTestUtils.getScm(conf); + scm = TestUtils.getScm(conf); scm.start(); scmRatisServer = scm.getScmRatisServer(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java index 69bc5bd93b68..0eddbdec0441 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java @@ -17,8 +17,8 @@ package org.apache.hadoop.hdds.scm.server.ratis; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.junit.After; @@ -56,7 +56,7 @@ public void init() throws Exception { scmId = UUID.randomUUID().toString(); initSCM(); - scm = HddsTestUtils.getScm(conf); + scm = TestUtils.getScm(conf); scm.start(); scmRatisServer = scm.getScmRatisServer(); scmStateMachine = scm.getScmRatisServer().getScmStateMachine(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 5baa65b43c37..649c4a93e47d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; @@ -302,7 +303,7 @@ public void restartStorageContainerManager(boolean waitForDatanode) AuthenticationException { scm.stop(); scm.join(); - scm = StorageContainerManager.createSCM(conf); + scm = TestUtils.getScmSimple(conf); scm.start(); if (waitForDatanode) { waitForClusterToBeReady(); @@ -628,7 +629,7 @@ protected StorageContainerManager createSCM() configureSCM(); SCMStorageConfig scmStore = new SCMStorageConfig(conf); initializeScmStorage(scmStore); - StorageContainerManager scm = StorageContainerManager.createSCM(conf); + StorageContainerManager scm = TestUtils.getScmSimple(conf); HealthyPipelineSafeModeRule rule = scm.getScmSafeModeManager().getHealthyPipelineSafeModeRule(); if (rule != null) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 291f19f88e49..a9fa1e52bdc9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -36,9 +36,9 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol; -import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmInfo; +import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.server.SCMHTTPServerConfig; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; @@ -267,7 +267,7 @@ private void setSecureConfig() throws IOException { public void testSecureScmStartupSuccess() throws Exception { initSCM(); - scm = StorageContainerManager.createSCM(conf); + scm = TestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); assertEquals(clusterId, scmInfo.getClusterId()); @@ -278,7 +278,7 @@ public void testSecureScmStartupSuccess() throws Exception { public void testSCMSecurityProtocol() throws Exception { initSCM(); - scm = HddsTestUtils.getScm(conf); + scm = TestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance try { scm.start(); @@ -338,7 +338,7 @@ public void testSecureScmStartupFailure() throws Exception { LambdaTestUtils.intercept(IOException.class, "Running in secure mode, but config doesn't have a keytab", - () -> StorageContainerManager.createSCM(conf)); + () -> TestUtils.getScmSimple(conf)); conf.set(HDDS_SCM_KERBEROS_PRINCIPAL_KEY, "scm/_HOST@EXAMPLE.com"); @@ -346,7 +346,7 @@ public void testSecureScmStartupFailure() throws Exception { "/etc/security/keytabs/scm.keytab"); testCommonKerberosFailures( - () -> StorageContainerManager.createSCM(conf)); + () -> TestUtils.getScmSimple(conf)); } @@ -375,7 +375,7 @@ private void testCommonKerberosFailures(Callable test) throws Exception { public void testSecureOMInitializationFailure() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it - scm = StorageContainerManager.createSCM(conf); + scm = TestUtils.getScmSimple(conf); setupOm(conf); conf.set(OZONE_OM_KERBEROS_PRINCIPAL_KEY, "non-existent-user@EXAMPLE.com"); @@ -389,7 +389,7 @@ public void testSecureOMInitializationFailure() throws Exception { public void testSecureOmInitializationSuccess() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it - scm = StorageContainerManager.createSCM(conf); + scm = TestUtils.getScmSimple(conf); LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger()); GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO); @@ -407,7 +407,7 @@ public void testSecureOmInitializationSuccess() throws Exception { public void testAccessControlExceptionOnClient() throws Exception { initSCM(); // Create a secure SCM instance as om client will connect to it - scm = StorageContainerManager.createSCM(conf); + scm = TestUtils.getScmSimple(conf); LogCapturer logs = LogCapturer.captureLogs(OzoneManager.getLogger()); GenericTestUtils.setLogLevel(OzoneManager.getLogger(), INFO); setupOm(conf); @@ -632,7 +632,7 @@ public void testSecureOmReInit() throws Exception { initSCM(); try { - scm = HddsTestUtils.getScm(conf); + scm = TestUtils.getScmSimple(conf); scm.start(); conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, false); OMStorage omStore = new OMStorage(conf); @@ -678,7 +678,7 @@ public void testSecureOmInitSuccess() throws Exception { omLogs.clearOutput(); initSCM(); try { - scm = HddsTestUtils.getScm(conf); + scm = TestUtils.getScmSimple(conf); scm.start(); OMStorage omStore = new OMStorage(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index 1320b5b9cd89..597a317433cd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -26,6 +26,8 @@ import static org.apache.hadoop.hdds.HddsConfigKeys .HDDS_SCM_SAFEMODE_PIPELINE_CREATION; import static org.junit.Assert.fail; + +import org.apache.hadoop.hdds.scm.TestUtils; import org.junit.Ignore; import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.eq; @@ -490,7 +492,7 @@ public void testSCMInitializationFailure() exception.expect(SCMException.class); exception.expectMessage( "SCM not initialized due to storage config failure"); - StorageContainerManager.createSCM(conf); + TestUtils.getScmSimple(conf); } @Test @@ -508,7 +510,7 @@ public void testScmInfo() throws Exception { scmStore.setScmId(scmId); // writes the version file properties scmStore.initialize(); - StorageContainerManager scm = StorageContainerManager.createSCM(conf); + StorageContainerManager scm = TestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); Assert.assertEquals(clusterId, scmInfo.getClusterId()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 7a1cb5b278dd..b122d7d546b5 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -48,6 +48,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes; +import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl; import org.apache.hadoop.hdds.scm.net.NodeSchema; @@ -170,6 +171,7 @@ public static void setUp() throws Exception { SCMConfigurator configurator = new SCMConfigurator(); configurator.setScmNodeManager(nodeManager); configurator.setNetworkTopology(clusterMap); + configurator.setSCMHAManager(MockSCMHAManager.getInstance()); scm = TestUtils.getScm(conf, configurator); scm.start(); scm.exitSafeMode(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java index 646b91571260..2da50b7771be 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java @@ -75,7 +75,7 @@ public void testAllOMNodesRunning() throws Exception { @Test public void testOneOMNodeDown() throws Exception { getCluster().stopOzoneManager(1); - Thread.sleep(NODE_FAILURE_TIMEOUT * 2); + Thread.sleep(NODE_FAILURE_TIMEOUT * 4); createVolumeTest(true); @@ -90,7 +90,7 @@ public void testOneOMNodeDown() throws Exception { public void testTwoOMNodesDown() throws Exception { getCluster().stopOzoneManager(1); getCluster().stopOzoneManager(2); - Thread.sleep(NODE_FAILURE_TIMEOUT * 2); + Thread.sleep(NODE_FAILURE_TIMEOUT * 4); createVolumeTest(false); @@ -308,7 +308,7 @@ public void testMultipartUploadWithOneOmNodeDown() throws Exception { // Stop one of the ozone manager, to see when the OM leader changes // multipart upload is happening successfully or not. getCluster().stopOzoneManager(leaderOMNodeId); - Thread.sleep(NODE_FAILURE_TIMEOUT * 2); + Thread.sleep(NODE_FAILURE_TIMEOUT * 4); createMultipartKeyAndReadKey(ozoneBucket, keyName, uploadID); @@ -340,7 +340,7 @@ public void testIncrementalWaitTimeWithSameNodeFailover() throws Exception { String leaderOMNodeId = omFailoverProxyProvider.getCurrentProxyOMNodeId(); getCluster().stopOzoneManager(leaderOMNodeId); - Thread.sleep(NODE_FAILURE_TIMEOUT * 2); + Thread.sleep(NODE_FAILURE_TIMEOUT * 4); createKeyTest(true); // failover should happen to new node long numTimesTriedToSameNode = omFailoverProxyProvider.getWaitTime() @@ -641,7 +641,7 @@ public void testListParts() throws Exception { // Stop leader OM, and then validate list parts. stopLeaderOM(); - Thread.sleep(NODE_FAILURE_TIMEOUT * 2); + Thread.sleep(NODE_FAILURE_TIMEOUT * 4); validateListParts(ozoneBucket, keyName, uploadID, partsMap); From 58394ebdd22db72b8f51a7ed700ad0c54eff4e3d Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Sat, 24 Oct 2020 20:55:36 +0530 Subject: [PATCH 16/51] HDDS-3837. Add isLeader check in SCMHAManager. --- .../hdds/scm/block/BlockManagerImpl.java | 5 +- .../container/CloseContainerEventHandler.java | 4 +- .../hadoop/hdds/scm/ha/SCMHAManager.java | 13 ++ .../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 85 +++++++++- .../apache/hadoop/hdds/scm/ha/SCMHAUtils.java | 6 +- .../hadoop/hdds/scm/ha/SCMRatisServer.java | 10 ++ .../hdds/scm/ha/SCMRatisServerImpl.java | 15 ++ .../hadoop/hdds/scm/node/NewNodeHandler.java | 12 +- .../node/NonHealthyToHealthyNodeHandler.java | 12 +- .../pipeline/BackgroundPipelineCreator.java | 2 +- .../hdds/scm/pipeline/PipelineManager.java | 5 +- .../scm/pipeline/PipelineManagerMXBean.java | 3 +- .../scm/pipeline/PipelineManagerV2Impl.java | 65 ++++++-- .../hadoop/hdds/scm/ha/MockSCMHAManager.java | 53 +++++- .../pipeline/TestPipelineActionHandler.java | 3 +- .../scm/pipeline/TestPipelineManagerImpl.java | 156 +++++++++++++++--- .../scm/safemode/TestSCMSafeModeManager.java | 2 +- 17 files changed, 401 insertions(+), 50 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index b5b2aafa9ed4..ec0094b024e0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -57,6 +57,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; + +import org.apache.ratis.protocol.NotLeaderException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -256,7 +258,8 @@ public AllocatedBlock allocateBlock(final long size, ReplicationType type, * @param containerInfo - Container Info. * @return AllocatedBlock */ - private AllocatedBlock newBlock(ContainerInfo containerInfo) { + private AllocatedBlock newBlock(ContainerInfo containerInfo) + throws NotLeaderException { try { final Pipeline pipeline = pipelineManager .getPipeline(containerInfo.getPipelineID()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java index fd73711003bf..a2b79fb027c0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; +import org.apache.ratis.protocol.NotLeaderException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -98,7 +99,7 @@ public void onMessage(ContainerID containerID, EventPublisher publisher) { * @throws ContainerNotFoundException */ private List getNodes(final ContainerInfo container) - throws ContainerNotFoundException { + throws ContainerNotFoundException, NotLeaderException { try { return pipelineManager.getPipeline(container.getPipelineID()).getNodes(); } catch (PipelineNotFoundException ex) { @@ -109,5 +110,4 @@ private List getNodes(final ContainerInfo container) .collect(Collectors.toList()); } } - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index eb6c8006c5e3..ade0ad965768 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -17,6 +17,9 @@ package org.apache.hadoop.hdds.scm.ha; +import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.RaftPeer; + import java.io.IOException; /** @@ -39,8 +42,18 @@ public interface SCMHAManager { */ SCMRatisServer getRatisServer(); + /** + * Returns suggested leader from RaftServer. + */ + RaftPeer getSuggestedLeader(); + /** * Stops the HA service. */ void shutdown() throws IOException; + + /** + * Returns NotLeaderException with useful info. + */ + NotLeaderException triggerNotLeaderException(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index 89ac714993c7..8bb94578b3b1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -17,7 +17,17 @@ package org.apache.hadoop.hdds.scm.ha; +import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.RaftGroupMemberId; +import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.server.RaftServer; +import org.apache.ratis.server.impl.RaftServerImpl; +import org.apache.ratis.server.impl.RaftServerProxy; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.IOException; @@ -31,14 +41,17 @@ */ public class SCMHAManagerImpl implements SCMHAManager { - private static boolean isLeader = true; + private static final Logger LOG = + LoggerFactory.getLogger(SCMHAManagerImpl.class); private final SCMRatisServerImpl ratisServer; + private final ConfigurationSource conf; /** * Creates SCMHAManager instance. */ public SCMHAManagerImpl(final ConfigurationSource conf) throws IOException { + this.conf = conf; this.ratisServer = new SCMRatisServerImpl( conf.getObject(SCMHAConfiguration.class), conf); } @@ -56,7 +69,28 @@ public void start() throws IOException { */ @Override public boolean isLeader() { - return isLeader; + if (!SCMHAUtils.isSCMHAEnabled(conf)) { + // When SCM HA is not enabled, the current SCM is always the leader. + return true; + } + RaftServer server = ratisServer.getServer(); + Preconditions.checkState(server instanceof RaftServerProxy); + RaftServerImpl serverImpl = null; + try { + // SCM only has one raft group. + serverImpl = ((RaftServerProxy) server) + .getImpl(ratisServer.getRaftGroupId()); + if (serverImpl != null) { + // Only when it's sure the current SCM is the leader, otherwise + // it should all return false. + return serverImpl.isLeader(); + } + } catch (IOException ioe) { + LOG.error("Fail to get RaftServer impl and therefore it's not clear " + + "whether it's leader. ", ioe); + } + + return false; } /** @@ -67,6 +101,42 @@ public SCMRatisServer getRatisServer() { return ratisServer; } + private RaftPeerId getPeerIdFromRoleInfo(RaftServerImpl serverImpl) { + if (serverImpl.isLeader()) { + return RaftPeerId.getRaftPeerId( + serverImpl.getRoleInfoProto().getLeaderInfo().toString()); + } else if (serverImpl.isFollower()) { + return RaftPeerId.valueOf( + serverImpl.getRoleInfoProto().getFollowerInfo() + .getLeaderInfo().getId().getId()); + } else { + return null; + } + } + + @Override + public RaftPeer getSuggestedLeader() { + RaftServer server = ratisServer.getServer(); + Preconditions.checkState(server instanceof RaftServerProxy); + RaftServerImpl serverImpl = null; + try { + // SCM only has one raft group. + serverImpl = ((RaftServerProxy) server) + .getImpl(ratisServer.getRaftGroupId()); + if (serverImpl != null) { + RaftPeerId peerId = getPeerIdFromRoleInfo(serverImpl); + if (peerId != null) { + return new RaftPeer(peerId); + } + return null; + } + } catch (IOException ioe) { + LOG.error("Fail to get RaftServer impl and therefore it's not clear " + + "whether it's leader. ", ioe); + } + return null; + } + /** * {@inheritDoc} */ @@ -75,4 +145,15 @@ public void shutdown() throws IOException { ratisServer.stop(); } + /** + * {@inheritDoc} + */ + @Override + public NotLeaderException triggerNotLeaderException() { + return new NotLeaderException(RaftGroupMemberId.valueOf( + ratisServer.getServer().getId(), + ratisServer.getRaftGroupId()), + getSuggestedLeader(), + ratisServer.getRaftPeers()); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java index eb22566f51d2..0f71744d64de 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java @@ -20,7 +20,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmUtils; import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisServer; @@ -37,12 +37,12 @@ private SCMHAUtils() { } // Check if SCM HA is enabled. - public static boolean isSCMHAEnabled(OzoneConfiguration conf) { + public static boolean isSCMHAEnabled(ConfigurationSource conf) { return conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT); } - public static File createSCMRatisDir(OzoneConfiguration conf) + public static File createSCMRatisDir(ConfigurationSource conf) throws IllegalArgumentException { String scmRatisDir = SCMRatisServer.getSCMRatisDirectory(conf); if (scmRatisDir == null || scmRatisDir.isEmpty()) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java index 4ddbc7b63ac9..2f997767cfa0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java @@ -18,8 +18,12 @@ package org.apache.hadoop.hdds.scm.ha; import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; +import org.apache.ratis.protocol.RaftGroupId; +import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.server.RaftServer; import java.io.IOException; +import java.util.List; import java.util.concurrent.ExecutionException; /** @@ -35,4 +39,10 @@ SCMRatisResponse submitRequest(SCMRatisRequest request) throws IOException, ExecutionException, InterruptedException; void stop() throws IOException; + + RaftServer getServer(); + + RaftGroupId getRaftGroupId(); + + List getRaftPeers(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java index 45ae212ebb66..33ae109ef825 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java @@ -21,6 +21,7 @@ import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; @@ -110,4 +111,18 @@ public void stop() throws IOException { server.close(); } + @Override + public RaftServer getServer() { + return server; + } + + @Override + public RaftGroupId getRaftGroupId() { + return raftGroupId; + } + + @Override + public List getRaftPeers() { + return Collections.singletonList(new RaftPeer(raftPeerId)); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java index a40a63a1dc7e..42cada998262 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java @@ -23,11 +23,16 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.ratis.protocol.NotLeaderException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Handles New Node event. */ public class NewNodeHandler implements EventHandler { + private static final Logger LOG = + LoggerFactory.getLogger(NewNodeHandler.class); private final PipelineManager pipelineManager; private final ConfigurationSource conf; @@ -41,6 +46,11 @@ public NewNodeHandler(PipelineManager pipelineManager, @Override public void onMessage(DatanodeDetails datanodeDetails, EventPublisher publisher) { - pipelineManager.triggerPipelineCreation(); + try { + pipelineManager.triggerPipelineCreation(); + } catch (NotLeaderException ex) { + LOG.debug("Not the current leader SCM and cannot start pipeline" + + " creation."); + } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java index cc32f8452c74..e73231be628c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java @@ -23,12 +23,17 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; +import org.apache.ratis.protocol.NotLeaderException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Handles Stale node event. */ public class NonHealthyToHealthyNodeHandler implements EventHandler { + private static final Logger LOG = + LoggerFactory.getLogger(NonHealthyToHealthyNodeHandler.class); private final PipelineManager pipelineManager; private final ConfigurationSource conf; @@ -42,6 +47,11 @@ public NonHealthyToHealthyNodeHandler( @Override public void onMessage(DatanodeDetails datanodeDetails, EventPublisher publisher) { - pipelineManager.triggerPipelineCreation(); + try { + pipelineManager.triggerPipelineCreation(); + } catch (NotLeaderException ex) { + LOG.debug("Not the current leader SCM and cannot start pipeline" + + " creation."); + } } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java index f240293b8ae6..42b3a939522e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java @@ -103,7 +103,7 @@ private boolean skipCreation(HddsProtos.ReplicationFactor factor, } } - private void createPipelines() { + private void createPipelines() throws RuntimeException { // TODO: #CLUTIL Different replication factor may need to be supported HddsProtos.ReplicationType type = HddsProtos.ReplicationType.valueOf( conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java index 9c997a8ab93b..ddd461b6d83e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; import org.apache.hadoop.hdds.server.events.EventHandler; +import org.apache.ratis.protocol.NotLeaderException; /** * Interface which exposes the api for pipeline management. @@ -55,7 +56,7 @@ List getPipelines(ReplicationType type, ReplicationFactor factor); List getPipelines(ReplicationType type, - Pipeline.PipelineState state); + Pipeline.PipelineState state) throws NotLeaderException; List getPipelines(ReplicationType type, ReplicationFactor factor, Pipeline.PipelineState state); @@ -84,7 +85,7 @@ void scrubPipeline(ReplicationType type, ReplicationFactor factor) void startPipelineCreator(); - void triggerPipelineCreation(); + void triggerPipelineCreation() throws NotLeaderException; void incNumBlocksAllocatedMetric(PipelineID id); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java index 6d7d71730891..55e096b2a57e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.pipeline; import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.ratis.protocol.NotLeaderException; import java.util.Map; @@ -33,6 +34,6 @@ public interface PipelineManagerMXBean { * Returns the number of pipelines in different state. * @return state to number of pipeline map */ - Map getPipelineInfo(); + Map getPipelineInfo() throws NotLeaderException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index 12417457e133..069540ce261e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.Time; +import org.apache.ratis.protocol.NotLeaderException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -75,19 +76,21 @@ public final class PipelineManagerV2Impl implements PipelineManager { private final SCMPipelineMetrics metrics; private long pipelineWaitDefaultTimeout; private final AtomicBoolean isInSafeMode; + private SCMHAManager scmhaManager; // Used to track if the safemode pre-checks have completed. This is designed // to prevent pipelines being created until sufficient nodes have registered. private final AtomicBoolean pipelineCreationAllowed; private PipelineManagerV2Impl(ConfigurationSource conf, - NodeManager nodeManager, - StateManager pipelineStateManager, - PipelineFactory pipelineFactory, + SCMHAManager scmhaManager, + StateManager pipelineStateManager, + PipelineFactory pipelineFactory, EventPublisher eventPublisher) { this.lock = new ReentrantReadWriteLock(); this.pipelineFactory = pipelineFactory; this.stateManager = pipelineStateManager; this.conf = conf; + this.scmhaManager = scmhaManager; this.eventPublisher = eventPublisher; this.pmInfoBean = MBeans.register("SCMPipelineManager", "SCMPipelineManagerInfo", this); @@ -120,7 +123,7 @@ public static PipelineManagerV2Impl newPipelineManager( nodeManager, stateManager, conf, eventPublisher); // Create PipelineManager PipelineManagerV2Impl pipelineManager = new PipelineManagerV2Impl(conf, - nodeManager, stateManager, pipelineFactory, eventPublisher); + scmhaManager, stateManager, pipelineFactory, eventPublisher); // Create background thread. Scheduler scheduler = new Scheduler( @@ -136,6 +139,7 @@ public static PipelineManagerV2Impl newPipelineManager( @Override public Pipeline createPipeline(ReplicationType type, ReplicationFactor factor) throws IOException { + checkLeader(); if (!isPipelineCreationAllowed() && factor != ReplicationFactor.ONE) { LOG.debug("Pipeline creation is not allowed until safe mode prechecks " + "complete"); @@ -266,6 +270,7 @@ public List getPipelines( @Override public void addContainerToPipeline( PipelineID pipelineID, ContainerID containerID) throws IOException { + checkLeader(); lock.writeLock().lock(); try { stateManager.addContainerToPipeline(pipelineID, containerID); @@ -277,6 +282,7 @@ public void addContainerToPipeline( @Override public void removeContainerFromPipeline( PipelineID pipelineID, ContainerID containerID) throws IOException { + checkLeader(); lock.writeLock().lock(); try { stateManager.removeContainerFromPipeline(pipelineID, containerID); @@ -288,6 +294,7 @@ public void removeContainerFromPipeline( @Override public NavigableSet getContainersInPipeline( PipelineID pipelineID) throws IOException { + checkLeader(); lock.readLock().lock(); try { return stateManager.getContainers(pipelineID); @@ -298,11 +305,13 @@ public NavigableSet getContainersInPipeline( @Override public int getNumberOfContainers(PipelineID pipelineID) throws IOException { + checkLeader(); return stateManager.getNumberOfContainers(pipelineID); } @Override public void openPipeline(PipelineID pipelineId) throws IOException { + checkLeader(); lock.writeLock().lock(); try { Pipeline pipeline = stateManager.getPipeline(pipelineId); @@ -328,6 +337,7 @@ public void openPipeline(PipelineID pipelineId) throws IOException { * @throws IOException */ protected void removePipeline(Pipeline pipeline) throws IOException { + checkLeader(); pipelineFactory.close(pipeline.getType(), pipeline); PipelineID pipelineID = pipeline.getId(); lock.writeLock().lock(); @@ -349,6 +359,7 @@ protected void removePipeline(Pipeline pipeline) throws IOException { */ protected void closeContainersForPipeline(final PipelineID pipelineId) throws IOException { + checkLeader(); Set containerIDs = stateManager.getContainers(pipelineId); for (ContainerID containerID : containerIDs) { eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID); @@ -364,6 +375,7 @@ protected void closeContainersForPipeline(final PipelineID pipelineId) @Override public void closePipeline(Pipeline pipeline, boolean onTimeout) throws IOException { + checkLeader(); PipelineID pipelineID = pipeline.getId(); lock.writeLock().lock(); try { @@ -393,6 +405,7 @@ public void closePipeline(Pipeline pipeline, boolean onTimeout) @Override public void scrubPipeline(ReplicationType type, ReplicationFactor factor) throws IOException { + checkLeader(); if (type != ReplicationType.RATIS || factor != ReplicationFactor.THREE) { // Only srub pipeline for RATIS THREE pipeline return; @@ -439,7 +452,9 @@ public void startPipelineCreator() { * Triggers pipeline creation after the specified time. */ @Override - public void triggerPipelineCreation() { + public void triggerPipelineCreation() throws NotLeaderException { + // TODO add checkLeader once follower validates safemode + // before it becomes leader. backgroundPipelineCreator.triggerPipelineCreation(); } @@ -457,6 +472,7 @@ public void incNumBlocksAllocatedMetric(PipelineID id) { @Override public void activatePipeline(PipelineID pipelineID) throws IOException { + checkLeader(); stateManager.updatePipelineState(pipelineID.getProtobuf(), HddsProtos.PipelineState.PIPELINE_OPEN); } @@ -470,6 +486,7 @@ public void activatePipeline(PipelineID pipelineID) @Override public void deactivatePipeline(PipelineID pipelineID) throws IOException { + checkLeader(); stateManager.updatePipelineState(pipelineID.getProtobuf(), HddsProtos.PipelineState.PIPELINE_DORMANT); } @@ -484,6 +501,7 @@ public void deactivatePipeline(PipelineID pipelineID) @Override public void waitPipelineReady(PipelineID pipelineID, long timeout) throws IOException { + checkLeader(); long st = Time.monotonicNow(); if (timeout == 0) { timeout = pipelineWaitDefaultTimeout; @@ -515,7 +533,8 @@ public void waitPipelineReady(PipelineID pipelineID, long timeout) } @Override - public Map getPipelineInfo() { + public Map getPipelineInfo() throws NotLeaderException { + checkLeader(); final Map pipelineInfo = new HashMap<>(); for (Pipeline.PipelineState state : Pipeline.PipelineState.values()) { pipelineInfo.put(state.toString(), 0); @@ -564,13 +583,21 @@ public void onMessage(SCMSafeModeManager.SafeModeStatus status, // Trigger pipeline creation only if the preCheck status has changed to // complete. - if (isPipelineCreationAllowed() && !currentAllowPipelines) { - triggerPipelineCreation(); - } - // Start the pipeline creation thread only when safemode switches off - if (!getSafeModeStatus() && currentlyInSafeMode) { - startPipelineCreator(); + + try { + if (isPipelineCreationAllowed() && !currentAllowPipelines) { + triggerPipelineCreation(); + } + // Start the pipeline creation thread only when safemode switches off + if (!getSafeModeStatus() && currentlyInSafeMode) { + startPipelineCreator(); + } + } catch (NotLeaderException ex) { + LOG.warn("Not the current leader SCM and cannot process pipeline" + + " creation. Suggested leader is: ", + scmhaManager.getSuggestedLeader().getAddress()); } + } @VisibleForTesting @@ -593,6 +620,20 @@ public void setPipelineProvider(ReplicationType replicationType, public StateManager getStateManager() { return stateManager; } + + public void setScmhaManager(SCMHAManager scmhaManager) { + this.scmhaManager = scmhaManager; + } + + /** + * Check if scm is current leader. + * @throws NotLeaderException when it's not the current leader. + */ + private void checkLeader() throws NotLeaderException { + if (!scmhaManager.isLeader()) { + throw scmhaManager.triggerNotLeaderException(); + } + } private void setBackgroundPipelineCreator( BackgroundPipelineCreator backgroundPipelineCreator) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java index c3b14fb405bb..ce48c1136a9f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java @@ -28,11 +28,14 @@ import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; import org.apache.ratis.protocol.ClientId; import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.NotLeaderException; import org.apache.ratis.protocol.RaftClientReply; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftGroupMemberId; +import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.StateMachineException; +import org.apache.ratis.server.RaftServer; /** * Mock SCMHAManager implementation for testing. @@ -40,16 +43,30 @@ public final class MockSCMHAManager implements SCMHAManager { private final SCMRatisServer ratisServer; + private boolean isLeader; public static SCMHAManager getInstance() { return new MockSCMHAManager(); } + public static SCMHAManager getLeaderInstance() { + MockSCMHAManager mockSCMHAManager = new MockSCMHAManager(); + mockSCMHAManager.setIsLeader(true); + return mockSCMHAManager; + } + + public static SCMHAManager getFollowerInstance() { + MockSCMHAManager mockSCMHAManager = new MockSCMHAManager(); + mockSCMHAManager.setIsLeader(false); + return mockSCMHAManager; + } + /** * Creates MockSCMHAManager instance. */ private MockSCMHAManager() { this.ratisServer = new MockRatisServer(); + this.isLeader = true; } @Override @@ -62,7 +79,16 @@ public void start() throws IOException { */ @Override public boolean isLeader() { - return true; + return isLeader; + } + + public void setIsLeader(boolean isLeader) { + this.isLeader = isLeader; + } + + @Override + public RaftPeer getSuggestedLeader() { + throw new UnsupportedOperationException(); } /** @@ -81,6 +107,16 @@ public void shutdown() throws IOException { ratisServer.stop(); } + /** + * {@inheritDoc} + */ + @Override + public NotLeaderException triggerNotLeaderException() { + return new NotLeaderException(RaftGroupMemberId.valueOf( + RaftPeerId.valueOf("peer"), RaftGroupId.randomId()), + null, new ArrayList<>()); + } + private static class MockRatisServer implements SCMRatisServer { private Map handlers = @@ -140,6 +176,21 @@ private Message process(final SCMRatisRequest request) } } + @Override + public RaftServer getServer() { + return null; + } + + @Override + public RaftGroupId getRaftGroupId() { + return null; + } + + @Override + public List getRaftPeers() { + return new ArrayList<>(); + } + @Override public void stop() { } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java index 99443c3b7eac..e40c8bace5d9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; +import org.apache.ratis.protocol.NotLeaderException; import org.junit.Test; import org.mockito.Mockito; @@ -37,7 +38,7 @@ public class TestPipelineActionHandler { @Test public void testCloseActionForMissingPipeline() - throws PipelineNotFoundException { + throws PipelineNotFoundException, NotLeaderException { final PipelineManager manager = Mockito.mock(PipelineManager.class); final EventQueue queue = Mockito.mock(EventQueue.class); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index e1f9104c9d15..a8f03bb6ad68 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; @@ -37,6 +38,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.ratis.protocol.NotLeaderException; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -66,7 +68,6 @@ public class TestPipelineManagerImpl { private DBStore dbStore; private static MockNodeManager nodeManager; private static int maxPipelineCount; - private static EventQueue eventQueue; @Before public void init() throws Exception { @@ -76,7 +77,6 @@ public void init() throws Exception { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition()); nodeManager = new MockNodeManager(true, 20); - eventQueue = new EventQueue(); maxPipelineCount = nodeManager.getNodeCount(HddsProtos.NodeState.HEALTHY) * conf.getInt(OZONE_DATANODE_PIPELINE_LIMIT, OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT) / @@ -91,17 +91,23 @@ public void cleanup() throws Exception { FileUtil.fullyDelete(testDir); } - private PipelineManagerV2Impl createPipelineManager() + private PipelineManagerV2Impl createPipelineManager(boolean leader) throws IOException { - return PipelineManagerV2Impl.newPipelineManager( - conf, MockSCMHAManager.getInstance(), - nodeManager, - SCMDBDefinition.PIPELINES.getTable(dbStore), eventQueue); + SCMHAManager scmhaManager; + if (leader) { + scmhaManager = MockSCMHAManager.getLeaderInstance(); + } else { + scmhaManager = MockSCMHAManager.getFollowerInstance(); + } + return PipelineManagerV2Impl.newPipelineManager(conf, scmhaManager, + new MockNodeManager(true, 20), + SCMDBDefinition.PIPELINES.getTable(dbStore), + new EventQueue()); } @Test public void testCreatePipeline() throws Exception { - PipelineManagerV2Impl pipelineManager = createPipelineManager(); + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); Assert.assertTrue(pipelineManager.getPipelines().isEmpty()); pipelineManager.allowPipelineCreation(); Pipeline pipeline1 = pipelineManager.createPipeline( @@ -115,7 +121,7 @@ public void testCreatePipeline() throws Exception { Assert.assertTrue(pipelineManager.containsPipeline(pipeline2.getId())); pipelineManager.close(); - PipelineManagerV2Impl pipelineManager2 = createPipelineManager(); + PipelineManagerV2Impl pipelineManager2 = createPipelineManager(true); // Should be able to load previous pipelines. Assert.assertFalse(pipelineManager.getPipelines().isEmpty()); Assert.assertEquals(2, pipelineManager.getPipelines().size()); @@ -128,9 +134,25 @@ public void testCreatePipeline() throws Exception { pipelineManager2.close(); } + @Test + public void testCreatePipelineShouldFailOnFollower() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(false); + Assert.assertTrue(pipelineManager.getPipelines().isEmpty()); + pipelineManager.allowPipelineCreation(); + try { + pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + } catch (NotLeaderException ex) { + pipelineManager.close(); + return; + } + // Should not reach here. + Assert.fail(); + } + @Test public void testUpdatePipelineStates() throws Exception { - PipelineManagerV2Impl pipelineManager = createPipelineManager(); + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); pipelineManager.allowPipelineCreation(); Pipeline pipeline = pipelineManager.createPipeline( HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); @@ -163,9 +185,72 @@ public void testUpdatePipelineStates() throws Exception { pipelineManager.close(); } + @Test + public void testOpenPipelineShouldFailOnFollower() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); + pipelineManager.allowPipelineCreation(); + Pipeline pipeline = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + Assert.assertEquals(1, pipelineManager.getPipelines().size()); + Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); + Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); + // Change to follower + pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance()); + try { + pipelineManager.openPipeline(pipeline.getId()); + } catch (NotLeaderException ex) { + pipelineManager.close(); + return; + } + // Should not reach here. + Assert.fail(); + } + + @Test + public void testActivatePipelineShouldFailOnFollower() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); + pipelineManager.allowPipelineCreation(); + Pipeline pipeline = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + Assert.assertEquals(1, pipelineManager.getPipelines().size()); + Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); + Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); + // Change to follower + pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance()); + try { + pipelineManager.activatePipeline(pipeline.getId()); + } catch (NotLeaderException ex) { + pipelineManager.close(); + return; + } + // Should not reach here. + Assert.fail(); + } + + @Test + public void testDeactivatePipelineShouldFailOnFollower() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); + pipelineManager.allowPipelineCreation(); + Pipeline pipeline = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + Assert.assertEquals(1, pipelineManager.getPipelines().size()); + Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); + Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); + // Change to follower + pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance()); + try { + pipelineManager.deactivatePipeline(pipeline.getId()); + } catch (NotLeaderException ex) { + pipelineManager.close(); + return; + } + // Should not reach here. + Assert.fail(); + } + @Test public void testRemovePipeline() throws Exception { - PipelineManagerV2Impl pipelineManager = createPipelineManager(); + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); pipelineManager.allowPipelineCreation(); // Create a pipeline Pipeline pipeline = pipelineManager.createPipeline( @@ -206,13 +291,34 @@ public void testRemovePipeline() throws Exception { pipelineManager.close(); } + @Test + public void testClosePipelineShouldFailOnFollower() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); + pipelineManager.allowPipelineCreation(); + Pipeline pipeline = pipelineManager.createPipeline( + HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); + Assert.assertEquals(1, pipelineManager.getPipelines().size()); + Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); + Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); + // Change to follower + pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance()); + try { + pipelineManager.closePipeline(pipeline, false); + } catch (NotLeaderException ex) { + pipelineManager.close(); + return; + } + // Should not reach here. + Assert.fail(); + } + @Test public void testPipelineReport() throws Exception { - PipelineManagerV2Impl pipelineManager = createPipelineManager(); + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); pipelineManager.allowPipelineCreation(); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager(conf, new ArrayList<>(), pipelineManager, - eventQueue); + new EventQueue()); Pipeline pipeline = pipelineManager .createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); @@ -258,7 +364,7 @@ public void testPipelineReport() throws Exception { @Test public void testPipelineCreationFailedMetric() throws Exception { - PipelineManagerV2Impl pipelineManager = createPipelineManager(); + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); pipelineManager.allowPipelineCreation(); // No pipeline at start @@ -313,7 +419,7 @@ public void testPipelineCreationFailedMetric() throws Exception { @Test public void testPipelineOpenOnlyWhenLeaderReported() throws Exception { - PipelineManagerV2Impl pipelineManager = createPipelineManager(); + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); pipelineManager.allowPipelineCreation(); pipelineManager.onMessage( @@ -324,13 +430,13 @@ public void testPipelineOpenOnlyWhenLeaderReported() throws Exception { // close manager pipelineManager.close(); // new pipeline manager loads the pipelines from the db in ALLOCATED state - pipelineManager = createPipelineManager(); + pipelineManager = createPipelineManager(true); Assert.assertEquals(Pipeline.PipelineState.ALLOCATED, pipelineManager.getPipeline(pipeline.getId()).getPipelineState()); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager(new OzoneConfiguration(), - new ArrayList<>(), pipelineManager, eventQueue); + new ArrayList<>(), pipelineManager, new EventQueue()); PipelineReportHandler pipelineReportHandler = new PipelineReportHandler(scmSafeModeManager, pipelineManager, conf); @@ -362,7 +468,7 @@ public void testScrubPipeline() throws Exception { OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1, TimeUnit.MILLISECONDS); - PipelineManagerV2Impl pipelineManager = createPipelineManager(); + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); pipelineManager.allowPipelineCreation(); Pipeline pipeline = pipelineManager .createPipeline(HddsProtos.ReplicationType.RATIS, @@ -388,6 +494,14 @@ public void testScrubPipeline() throws Exception { pipelineManager.close(); } + @Test (expected = NotLeaderException.class) + public void testScrubPipelineShouldFailOnFollower() throws Exception { + PipelineManagerV2Impl pipelineManager = createPipelineManager(false); + pipelineManager.allowPipelineCreation(); + pipelineManager.scrubPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + } + @Test public void testPipelineNotCreatedUntilSafeModePrecheck() throws Exception { // No timeout for pipeline scrubber. @@ -395,7 +509,7 @@ public void testPipelineNotCreatedUntilSafeModePrecheck() throws Exception { OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1, TimeUnit.MILLISECONDS); - PipelineManagerV2Impl pipelineManager = createPipelineManager(); + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); try { pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); @@ -433,7 +547,7 @@ public void testSafeModeUpdatedOnSafemodeExit() throws Exception { OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1, TimeUnit.MILLISECONDS); - PipelineManagerV2Impl pipelineManager = createPipelineManager(); + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); Assert.assertTrue(pipelineManager.getSafeModeStatus()); Assert.assertFalse(pipelineManager.isPipelineCreationAllowed()); // First pass pre-check as true, but safemode still on @@ -456,6 +570,6 @@ private void sendPipelineReport( boolean isLeader) { SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode report = TestUtils.getPipelineReportFromDatanode(dn, pipeline.getId(), isLeader); - pipelineReportHandler.onMessage(report, eventQueue); + pipelineReportHandler.onMessage(report, new EventQueue()); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index 935dc7761022..0febf0630bb9 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -485,7 +485,7 @@ private void firePipelineEvent(PipelineManager pipelineManager, @Test - public void testDisableSafeMode() { + public void testDisableSafeMode() throws IOException { OzoneConfiguration conf = new OzoneConfiguration(config); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false); PipelineManager pipelineManager = Mockito.mock(PipelineManager.class); From 3ed29d88202d20873f9f2b7f4e9635ee954bdaf8 Mon Sep 17 00:00:00 2001 From: Glen Geng Date: Sat, 24 Oct 2020 20:58:07 +0530 Subject: [PATCH 17/51] HDDS-4059. SCMStateMachine::applyTransaction() should not invoke TransactionContext.getClientRequest(). --- .../java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java index b10dd549587e..ee26e58ee392 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java @@ -54,7 +54,7 @@ public CompletableFuture applyTransaction( new CompletableFuture<>(); try { final SCMRatisRequest request = SCMRatisRequest.decode( - trx.getClientRequest().getMessage()); + Message.valueOf(trx.getStateMachineLogEntry().getLogData())); applyTransactionFuture.complete(process(request)); } catch (Exception ex) { applyTransactionFuture.completeExceptionally(ex); From d482abf62fa1ec72da8de0007535b82466a1be57 Mon Sep 17 00:00:00 2001 From: Glen Geng Date: Sat, 24 Oct 2020 20:59:18 +0530 Subject: [PATCH 18/51] HDDS-4125. Pipeline is not removed when a datanode goes stale. --- .../hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index 069540ce261e..4690f29e14cb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -406,10 +406,7 @@ public void closePipeline(Pipeline pipeline, boolean onTimeout) public void scrubPipeline(ReplicationType type, ReplicationFactor factor) throws IOException { checkLeader(); - if (type != ReplicationType.RATIS || factor != ReplicationFactor.THREE) { - // Only srub pipeline for RATIS THREE pipeline - return; - } + Instant currentTime = Instant.now(); Long pipelineScrubTimeoutInMills = conf.getTimeDuration( ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, From a70964ec84ac04ca3f0dbad9dd7b43f0f0807707 Mon Sep 17 00:00:00 2001 From: Glen Geng Date: Sat, 24 Oct 2020 21:04:32 +0530 Subject: [PATCH 19/51] HDDS-4130. remove the 1st edition of RatisServer of SCM HA which is copied from OM HA. --- .../apache/hadoop/hdds/scm/ha/SCMHAUtils.java | 14 -- .../scm/server/StorageContainerManager.java | 78 +--------- .../scm/server/ratis/SCMStateMachine.java | 6 +- .../scm/server/ratis/TestSCMRatisServer.java | 147 ------------------ .../scm/server/ratis/TestSCMStateMachine.java | 120 -------------- 5 files changed, 5 insertions(+), 360 deletions(-) delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java delete mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java index 0f71744d64de..48946b487913 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java @@ -19,13 +19,9 @@ package org.apache.hadoop.hdds.scm.ha; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.ScmUtils; -import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisServer; -import java.io.File; import java.util.Collection; /** @@ -42,16 +38,6 @@ public static boolean isSCMHAEnabled(ConfigurationSource conf) { ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT); } - public static File createSCMRatisDir(ConfigurationSource conf) - throws IllegalArgumentException { - String scmRatisDir = SCMRatisServer.getSCMRatisDirectory(conf); - if (scmRatisDir == null || scmRatisDir.isEmpty()) { - throw new IllegalArgumentException(HddsConfigKeys.OZONE_METADATA_DIRS + - " must be defined."); - } - return ScmUtils.createSCMDir(scmRatisDir); - } - /** * Get a collection of all scmNodeIds for the given scmServiceId. */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index cbd713c7cc5e..b17729bf62d0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -31,13 +31,11 @@ import com.google.common.cache.RemovalListener; import com.google.protobuf.BlockingService; -import java.io.File; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.Collection; import java.util.HashMap; import java.util.Map; -import java.util.Collections; import java.util.Objects; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; @@ -54,10 +52,6 @@ import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl; -import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; -import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; -import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisServer; -import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisSnapshotInfo; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdds.scm.ScmConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -128,7 +122,6 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT; import org.apache.ratis.grpc.GrpcTlsConfig; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -202,11 +195,6 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl private CertificateServer certificateServer; private GrpcTlsConfig grpcTlsConfig; - // SCM HA related - private SCMRatisServer scmRatisServer; - private SCMRatisSnapshotInfo scmRatisSnapshotInfo; - private File scmRatisSnapshotDir; - private JvmPauseMonitor jvmPauseMonitor; private final OzoneConfiguration configuration; private SCMContainerMetrics scmContainerMetrics; @@ -276,10 +264,6 @@ private StorageContainerManager(OzoneConfiguration conf, loginAsSCMUser(conf); } - this.scmRatisSnapshotInfo = new SCMRatisSnapshotInfo( - scmStorageConfig.getCurrentDir()); - this.scmRatisSnapshotDir = SCMHAUtils.createSCMRatisDir(conf); - // Creates the SCM DBs or opens them if it exists. // A valid pointer to the store is required by all the other services below. initalizeMetadataStore(conf, configurator); @@ -400,13 +384,7 @@ private StorageContainerManager(OzoneConfiguration conf, public static StorageContainerManager createSCM( OzoneConfiguration conf, SCMConfigurator configurator) throws IOException, AuthenticationException { - StorageContainerManager scm = new StorageContainerManager( - conf, configurator); - if (SCMHAUtils.isSCMHAEnabled(conf) && scm.getScmRatisServer() == null) { - SCMRatisServer scmRatisServer = initializeRatisServer(conf, scm); - scm.setScmRatisServer(scmRatisServer); - } - return scm; + return new StorageContainerManager(conf, configurator); } /** @@ -840,10 +818,6 @@ public void start() throws IOException { getClientRpcAddress())); } - if (scmRatisServer != null) { - scmRatisServer.start(); - } - scmHAManager.start(); ms = HddsServerUtil @@ -1180,56 +1154,6 @@ public NetworkTopology getClusterMap() { return this.clusterMap; } - private static SCMRatisServer initializeRatisServer( - OzoneConfiguration conf, StorageContainerManager scm) throws IOException { - SCMNodeDetails scmNodeDetails = SCMNodeDetails - .initStandAlone(conf); - //TODO enable Ratis group - SCMRatisServer scmRatisServer = SCMRatisServer.newSCMRatisServer( - conf.getObject(SCMRatisServer.SCMRatisServerConfiguration.class), - scm, scmNodeDetails, Collections.EMPTY_LIST, - SCMRatisServer.getSCMRatisDirectory(conf)); - if (scmRatisServer != null) { - LOG.info("SCM Ratis server initialized at port {}", - scmRatisServer.getServerPort()); - } // TODO error handling for scmRatisServer creation failure - return scmRatisServer; - } - - @VisibleForTesting - public SCMRatisServer getScmRatisServer() { - return scmRatisServer; - } - - public void setScmRatisServer(SCMRatisServer scmRatisServer) { - this.scmRatisServer = scmRatisServer; - } - - @VisibleForTesting - public SCMRatisSnapshotInfo getSnapshotInfo() { - return scmRatisSnapshotInfo; - } - - @VisibleForTesting - public long getRatisSnapshotIndex() { - return scmRatisSnapshotInfo.getIndex(); - } - - /** - * Save ratis snapshot to SCM meta store and local disk. - */ - public TermIndex saveRatisSnapshot() throws IOException { - TermIndex snapshotIndex = scmRatisServer.getLastAppliedTermIndex(); - if (scmMetadataStore != null) { - // Flush the SCM state to disk - scmMetadataStore.getStore().flushDB(); - } - - scmRatisSnapshotInfo.saveRatisSnapshotToDisk(snapshotIndex); - - return snapshotIndex; - } - /** * Get the safe mode status of all rules. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java index 144380a620c1..9a725a673b39 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java @@ -73,7 +73,8 @@ public SCMStateMachine(SCMRatisServer ratisServer) { this.scmRatisServer = ratisServer; this.scm = ratisServer.getSCM(); - this.snapshotInfo = scm.getSnapshotInfo(); + // TODO: remove the whole file later + this.snapshotInfo = null; updateLastAppliedIndexWithSnaphsotIndex(); ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true) @@ -158,7 +159,8 @@ public void unpause(long newLastAppliedSnaphsotIndex, public long takeSnapshot() throws IOException { LOG.info("Saving Ratis snapshot on the SCM."); if (scm != null) { - return scm.saveRatisSnapshot().getIndex(); + // TODO: remove the whole file later + return 0; } return 0; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java deleted file mode 100644 index 8a233aeca13e..000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hdds.scm.server.ratis; - -import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.util.LifeCycle; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Collections; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; - -/** - * Test class for SCM Ratis Server. - */ -public class TestSCMRatisServer { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private OzoneConfiguration conf; - private SCMRatisServer scmRatisServer; - private StorageContainerManager scm; - private String scmId; - private static final long LEADER_ELECTION_TIMEOUT = 500L; - - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - scmId = UUID.randomUUID().toString(); - conf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - conf.set(ScmConfigKeys.OZONE_SCM_INTERNAL_SERVICE_ID, "scm-ha-test"); - - // Standalone SCM Ratis server - initSCM(); - scm = TestUtils.getScm(conf); - scm.start(); - scmRatisServer = scm.getScmRatisServer(); - } - - @After - public void shutdown() { - if (scmRatisServer != null) { - scmRatisServer.stop(); - } - if (scm != null) { - scm.stop(); - } - } - - @Test - public void testStartSCMRatisServer() { - Assert.assertEquals("Ratis Server should be in running state", - LifeCycle.State.RUNNING, scmRatisServer.getServerState()); - } - - @Test - public void verifyRaftGroupIdGenerationWithCustomOmServiceId() throws - Exception { - String customScmServiceId = "scmIdCustom123"; - OzoneConfiguration newConf = new OzoneConfiguration(); - String newOmId = UUID.randomUUID().toString(); - String path = GenericTestUtils.getTempPath(newOmId); - Path metaDirPath = Paths.get(path, "scm-meta"); - newConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); - newConf.setTimeDuration( - ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY, - LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS); - int ratisPort = 9873; - InetSocketAddress rpcAddress = new InetSocketAddress( - InetAddress.getLocalHost(), 0); - SCMNodeDetails nodeDetails = new SCMNodeDetails.Builder() - .setRpcAddress(rpcAddress) - .setRatisPort(ratisPort) - .setSCMNodeId(newOmId) - .setSCMServiceId(customScmServiceId) - .build(); - // Starts a single node Ratis server - scmRatisServer.stop(); - SCMRatisServer newScmRatisServer = SCMRatisServer - .newSCMRatisServer(newConf.getObject(SCMRatisServer - .SCMRatisServerConfiguration.class), scm, nodeDetails, - Collections.emptyList(), - SCMRatisServer.getSCMRatisDirectory(newConf)); - newScmRatisServer.start(); - - UUID uuid = UUID.nameUUIDFromBytes(customScmServiceId.getBytes()); - RaftGroupId raftGroupId = newScmRatisServer.getRaftGroup().getGroupId(); - Assert.assertEquals(uuid, raftGroupId.getUuid()); - Assert.assertEquals(raftGroupId.toByteString().size(), 16); - newScmRatisServer.stop(); - } - - private void initSCM() throws IOException { - String clusterId = UUID.randomUUID().toString(); - scmId = UUID.randomUUID().toString(); - - final String path = folder.newFolder().toString(); - Path scmPath = Paths.get(path, "scm-meta"); - Files.createDirectories(scmPath); - conf.set(OZONE_METADATA_DIRS, scmPath.toString()); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java deleted file mode 100644 index 0eddbdec0441..000000000000 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.scm.server.ratis; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.TestUtils; -import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.TemporaryFolder; - -import java.io.IOException; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.UUID; - -import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; - -/** - * Test class for SCMStateMachine. - */ -public class TestSCMStateMachine { - @Rule - public TemporaryFolder folder = new TemporaryFolder(); - - private SCMStateMachine scmStateMachine; - private StorageContainerManager scm; - private SCMRatisServer scmRatisServer; - private OzoneConfiguration conf; - private String scmId; - @Before - public void init() throws Exception { - conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - conf.set(ScmConfigKeys.OZONE_SCM_INTERNAL_SERVICE_ID, "scm-ha-test"); - scmId = UUID.randomUUID().toString(); - - initSCM(); - scm = TestUtils.getScm(conf); - scm.start(); - scmRatisServer = scm.getScmRatisServer(); - scmStateMachine = scm.getScmRatisServer().getScmStateMachine(); - } - - @Test - public void testSCMUpdatedAppliedIndex(){ - // State machine should start with 0 term and 0 index. - scmStateMachine.notifyIndexUpdate(0, 0); - Assert.assertEquals(0, - scmStateMachine.getLastAppliedTermIndex().getTerm()); - Assert.assertEquals(0, - scmStateMachine.getLastAppliedTermIndex().getIndex()); - - // If only the transactionMap is updated, index should stay 0. - scmStateMachine.addApplyTransactionTermIndex(0, 1); - Assert.assertEquals(0L, - scmStateMachine.getLastAppliedTermIndex().getTerm()); - Assert.assertEquals(0L, - scmStateMachine.getLastAppliedTermIndex().getIndex()); - - // After the index update is notified, the index should increase. - scmStateMachine.notifyIndexUpdate(0, 1); - Assert.assertEquals(0L, - scmStateMachine.getLastAppliedTermIndex().getTerm()); - Assert.assertEquals(1L, - scmStateMachine.getLastAppliedTermIndex().getIndex()); - - // Only do a notifyIndexUpdate can also increase the index. - scmStateMachine.notifyIndexUpdate(0, 2); - Assert.assertEquals(0L, - scmStateMachine.getLastAppliedTermIndex().getTerm()); - Assert.assertEquals(2L, - scmStateMachine.getLastAppliedTermIndex().getIndex()); - - // If a larger index is notified, the index should not be updated. - scmStateMachine.notifyIndexUpdate(0, 5); - Assert.assertEquals(0L, - scmStateMachine.getLastAppliedTermIndex().getTerm()); - Assert.assertEquals(2L, - scmStateMachine.getLastAppliedTermIndex().getIndex()); - } - - private void initSCM() throws IOException { - String clusterId = UUID.randomUUID().toString(); - final String path = folder.newFolder().toString(); - Path scmPath = Paths.get(path, "scm-meta"); - Files.createDirectories(scmPath); - conf.set(OZONE_METADATA_DIRS, scmPath.toString()); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - scmStore.setClusterId(clusterId); - scmStore.setScmId(scmId); - // writes the version file properties - scmStore.initialize(); - } - - @After - public void cleanup() { - scm.stop(); - } -} From 9e0dd848d7a363a64733518335fbfb5e8d47a20f Mon Sep 17 00:00:00 2001 From: Nandakumar Date: Sat, 24 Oct 2020 21:08:33 +0530 Subject: [PATCH 20/51] HDDS-3895. Implement container related operations in ContainerManagerImpl. --- .../hdds/scm/container/ContainerID.java | 32 ++- .../hdds/scm/container/ContainerInfo.java | 7 +- .../container/common/helpers/ExcludeList.java | 2 +- .../src/main/proto/hdds.proto | 7 + .../DatanodeDeletedBlockTransactions.java | 2 +- .../hdds/scm/block/DeletedBlockLogImpl.java | 2 +- .../AbstractContainerReportHandler.java | 2 +- .../container/ContainerActionsHandler.java | 2 +- .../scm/container/ContainerManagerImpl.java | 175 ++++++++-------- .../scm/container/ContainerManagerV2.java | 65 ++---- .../scm/container/ContainerReportHandler.java | 6 +- .../scm/container/ContainerStateManager.java | 36 ++-- .../container/ContainerStateManagerImpl.java | 194 ++++++++++-------- .../container/ContainerStateManagerV2.java | 40 +++- .../IncrementalContainerReportHandler.java | 2 +- .../scm/container/SCMContainerManager.java | 13 +- .../container/states/ContainerAttribute.java | 2 +- .../container/states/ContainerStateMap.java | 172 ++++++++-------- .../hdds/scm/metadata/ContainerIDCodec.java | 4 +- .../scm/server/SCMClientProtocolServer.java | 20 +- .../scm/server/StorageContainerManager.java | 2 +- .../hdds/scm/block/TestDeletedBlockLog.java | 7 +- .../TestCloseContainerEventHandler.java | 4 +- .../TestContainerActionsHandler.java | 2 +- .../container/TestContainerManagerImpl.java | 30 ++- .../container/TestSCMContainerManager.java | 2 +- .../states/TestContainerAttribute.java | 18 +- .../hdds/scm/node/TestDeadNodeHandler.java | 10 +- .../node/states/TestNode2ContainerMap.java | 10 +- .../scm/pipeline/TestPipelineManagerImpl.java | 4 +- .../pipeline/TestPipelineStateManager.java | 22 +- .../scm/pipeline/TestSCMPipelineManager.java | 4 +- .../ozone/client/io/KeyOutputStream.java | 2 +- .../TestContainerStateManagerIntegration.java | 8 +- .../TestSCMContainerManagerMetrics.java | 6 +- .../apache/hadoop/ozone/OzoneTestUtils.java | 10 +- .../rpc/TestContainerReplicationEndToEnd.java | 6 +- .../rpc/TestFailureHandlingByClient.java | 10 +- ...TestFailureHandlingByClientFlushDelay.java | 2 +- .../TestMultiBlockWritesWithDnFailures.java | 4 +- .../TestOzoneClientRetriesOnException.java | 4 +- ...oneClientRetriesOnExceptionFlushDelay.java | 2 +- .../rpc/TestOzoneRpcClientAbstract.java | 2 +- .../ozone/client/rpc/TestReadRetries.java | 2 +- .../hadoop/ozone/container/TestHelper.java | 6 +- .../TestCloseContainerByPipeline.java | 8 +- .../TestCloseContainerHandler.java | 2 +- .../TestDeleteContainerHandler.java | 2 +- .../ozone/dn/scrubber/TestDataScrubber.java | 2 +- .../ozone/om/TestContainerReportWithKeys.java | 2 +- .../ozone/recon/TestReconAsPassiveScm.java | 2 +- .../hadoop/ozone/scm/TestSCMMXBean.java | 12 +- .../ozone/recon/api/ContainerEndpoint.java | 4 +- .../ozone/recon/fsck/ContainerHealthTask.java | 2 +- .../recon/scm/ReconContainerManager.java | 2 +- .../scm/ReconContainerReportHandler.java | 2 +- ...econIncrementalContainerReportHandler.java | 2 +- .../recon/api/TestContainerEndpoint.java | 2 +- .../recon/fsck/TestContainerHealthStatus.java | 2 +- .../recon/fsck/TestContainerHealthTask.java | 24 +-- ...estContainerHealthTaskRecordGenerator.java | 2 +- .../AbstractReconContainerManagerTest.java | 6 +- .../recon/scm/TestReconContainerManager.java | 2 +- ...econIncrementalContainerReportHandler.java | 2 +- 64 files changed, 563 insertions(+), 483 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java index bb44da4e78e5..1a6be9660ce0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java @@ -23,6 +23,7 @@ import org.apache.commons.lang3.builder.CompareToBuilder; import org.apache.commons.lang3.builder.EqualsBuilder; import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; /** * Container ID is an integer that is a value between 1..MAX_CONTAINER ID. @@ -34,13 +35,14 @@ public final class ContainerID implements Comparable { private final long id; - // TODO: make this private. /** * Constructs ContainerID. * * @param id int */ - public ContainerID(long id) { + private ContainerID(long id) { + Preconditions.checkState(id > 0, + "Container ID should be a positive. %s.", id); this.id = id; } @@ -49,9 +51,7 @@ public ContainerID(long id) { * @param containerID long * @return ContainerID. */ - public static ContainerID valueof(final long containerID) { - Preconditions.checkState(containerID > 0, - "Container ID should be a positive long. "+ containerID); + public static ContainerID valueOf(final long containerID) { return new ContainerID(containerID); } @@ -60,14 +60,30 @@ public static ContainerID valueof(final long containerID) { * * @return int */ + @Deprecated + /* + * Don't expose the int value. + */ public long getId() { return id; } + /** + * Use proto message. + */ + @Deprecated public byte[] getBytes() { return Longs.toByteArray(id); } + public HddsProtos.ContainerID getProtobuf() { + return HddsProtos.ContainerID.newBuilder().setId(id).build(); + } + + public static ContainerID getFromProtobuf(HddsProtos.ContainerID proto) { + return ContainerID.valueOf(proto.getId()); + } + @Override public boolean equals(final Object o) { if (this == o) { @@ -81,14 +97,14 @@ public boolean equals(final Object o) { final ContainerID that = (ContainerID) o; return new EqualsBuilder() - .append(getId(), that.getId()) + .append(id, that.id) .isEquals(); } @Override public int hashCode() { return new HashCodeBuilder(61, 71) - .append(getId()) + .append(id) .toHashCode(); } @@ -96,7 +112,7 @@ public int hashCode() { public int compareTo(final ContainerID that) { Preconditions.checkNotNull(that); return new CompareToBuilder() - .append(this.getId(), that.getId()) + .append(this.id, that.id) .build(); } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java index b8f1a926f186..e621a4f54eac 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java @@ -121,6 +121,11 @@ public static ContainerInfo fromProtobuf(HddsProtos.ContainerInfoProto info) { .build(); } + /** + * This method is depricated, use {@code containerID()} which returns + * {@link ContainerID} object. + */ + @Deprecated public long getContainerID() { return containerID; } @@ -179,7 +184,7 @@ public void updateSequenceId(long sequenceID) { } public ContainerID containerID() { - return new ContainerID(getContainerID()); + return ContainerID.valueOf(containerID); } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java index 803aa0367045..824a1f5833ab 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java @@ -91,7 +91,7 @@ public static ExcludeList getFromProtoBuf( HddsProtos.ExcludeListProto excludeListProto) { ExcludeList excludeList = new ExcludeList(); excludeListProto.getContainerIdsList().forEach(id -> { - excludeList.addConatinerId(ContainerID.valueof(id)); + excludeList.addConatinerId(ContainerID.valueOf(id)); }); DatanodeDetails.Builder builder = DatanodeDetails.newBuilder(); excludeListProto.getDatanodesList().forEach(dn -> { diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index b43a74cd0679..d89e7b4c2140 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -83,6 +83,10 @@ message PipelineID { optional UUID uuid128 = 100; } +message ContainerID { + required uint64 id = 1; +} + enum PipelineState { PIPELINE_ALLOCATED = 1; PIPELINE_OPEN = 2; @@ -181,6 +185,7 @@ enum LifeCycleEvent { } message ContainerInfoProto { + // Replace int64 with ContainerID message required int64 containerID = 1; required LifeCycleState state = 2; optional PipelineID pipelineID = 3; @@ -236,6 +241,7 @@ enum ScmOps { message ExcludeListProto { repeated string datanodes = 1; + // Replace int64 with ContainerID message repeated int64 containerIds = 2; repeated PipelineID pipelineIds = 3; } @@ -244,6 +250,7 @@ message ExcludeListProto { * Block ID that uniquely identify a block by SCM. */ message ContainerBlockID { + // Replace int64 with ContainerID message required int64 containerID = 1; required int64 localID = 2; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java index dca152900cb6..2420d610c06f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java @@ -59,7 +59,7 @@ public boolean addTransaction(DeletedBlocksTransaction tx, Set dnsWithTransactionCommitted) { try { boolean success = false; - final ContainerID id = ContainerID.valueof(tx.getContainerID()); + final ContainerID id = ContainerID.valueOf(tx.getContainerID()); final ContainerInfo container = containerManager.getContainer(id); final Set replicas = containerManager .getContainerReplicas(id); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index edd3d4aec096..5d43a7532338 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -193,7 +193,7 @@ public void commitTransactions( long txID = transactionResult.getTxID(); // set of dns which have successfully committed transaction txId. dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID); - final ContainerID containerId = ContainerID.valueof( + final ContainerID containerId = ContainerID.valueOf( transactionResult.getContainerID()); if (dnsWithCommittedTxn == null) { // Mostly likely it's a retried delete command response. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java index 1b190a22da1b..02dc3f50aeff 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java @@ -75,7 +75,7 @@ protected void processContainerReplica(final DatanodeDetails datanodeDetails, final ContainerReplicaProto replicaProto, final EventPublisher publisher) throws IOException { final ContainerID containerId = ContainerID - .valueof(replicaProto.getContainerID()); + .valueOf(replicaProto.getContainerID()); if (logger.isDebugEnabled()) { logger.debug("Processing replica of container {} from datanode {}", diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java index e79f268974cf..3d53e292172c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java @@ -45,7 +45,7 @@ public void onMessage( DatanodeDetails dd = containerReportFromDatanode.getDatanodeDetails(); for (ContainerAction action : containerReportFromDatanode.getReport() .getContainerActionsList()) { - ContainerID containerId = ContainerID.valueof(action.getContainerID()); + ContainerID containerId = ContainerID.valueOf(action.getContainerID()); switch (action.getAction()) { case CLOSE: if (LOG.isDebugEnabled()) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index 36b9a308e8fe..3477eea2c758 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -23,12 +23,14 @@ import java.util.List; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; @@ -37,7 +39,9 @@ import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; +import org.apache.hadoop.hdds.utils.UniqueId; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; import org.apache.hadoop.util.Time; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,6 +64,7 @@ public class ContainerManagerImpl implements ContainerManagerV2 { /** * */ + //Can we move this lock to ContainerStateManager? private final ReadWriteLock lock; /** @@ -93,94 +98,45 @@ public ContainerManagerImpl( } @Override - public Set getContainerIDs() { - lock.readLock().lock(); - try { - return containerStateManager.getContainerIDs(); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public Set getContainers() { - lock.readLock().lock(); - try { - return containerStateManager.getContainerIDs().stream().map(id -> { - try { - return containerStateManager.getContainer(id); - } catch (ContainerNotFoundException e) { - // How can this happen? o_O - return null; - } - }).filter(Objects::nonNull).collect(Collectors.toSet()); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public ContainerInfo getContainer(final ContainerID containerID) + public ContainerInfo getContainer(final ContainerID id) throws ContainerNotFoundException { lock.readLock().lock(); try { - return containerStateManager.getContainer(containerID); - } finally { - lock.readLock().unlock(); - } - } - - @Override - public Set getContainers(final LifeCycleState state) { - lock.readLock().lock(); - try { - return containerStateManager.getContainerIDs(state).stream().map(id -> { - try { - return containerStateManager.getContainer(id); - } catch (ContainerNotFoundException e) { - // How can this happen? o_O - return null; - } - }).filter(Objects::nonNull).collect(Collectors.toSet()); + return Optional.ofNullable(containerStateManager + .getContainer(id.getProtobuf())) + .orElseThrow(() -> new ContainerNotFoundException("ID " + id)); } finally { lock.readLock().unlock(); } } @Override - public boolean exists(final ContainerID containerID) { + public List listContainers(final ContainerID startID, + final int count) { lock.readLock().lock(); try { - return (containerStateManager.getContainer(containerID) != null); - } catch (ContainerNotFoundException ex) { - return false; + final long start = startID == null ? 0 : startID.getId(); + final List containersIds = + new ArrayList<>(containerStateManager.getContainerIDs()); + Collections.sort(containersIds); + return containersIds.stream() + .filter(id -> id.getId() > start).limit(count) + .map(ContainerID::getProtobuf) + .map(containerStateManager::getContainer) + .collect(Collectors.toList()); } finally { lock.readLock().unlock(); } } @Override - public List listContainers(final ContainerID startID, - final int count) { + public List listContainers(final LifeCycleState state) { lock.readLock().lock(); try { - final long startId = startID == null ? 0 : startID.getId(); - final List containersIds = - new ArrayList<>(containerStateManager.getContainerIDs()); - Collections.sort(containersIds); - return containersIds.stream() - .filter(id -> id.getId() > startId) - .limit(count) - .map(id -> { - try { - return containerStateManager.getContainer(id); - } catch (ContainerNotFoundException ex) { - // This can never happen, as we hold lock no one else can remove - // the container after we got the container ids. - LOG.warn("Container Missing.", ex); - return null; - } - }).collect(Collectors.toList()); + return containerStateManager.getContainerIDs(state).stream() + .map(ContainerID::getProtobuf) + .map(containerStateManager::getContainer) + .filter(Objects::nonNull).collect(Collectors.toList()); } finally { lock.readLock().unlock(); } @@ -201,8 +157,8 @@ public ContainerInfo allocateContainer(final ReplicationType type, replicationFactor + ", State:PipelineState.OPEN"); } - final ContainerID containerID = containerStateManager - .getNextContainerID(); + // TODO: Replace this with Distributed unique id generator. + final ContainerID containerID = ContainerID.valueOf(UniqueId.next()); final Pipeline pipeline = pipelines.get( (int) containerID.getId() % pipelines.size()); @@ -222,43 +178,65 @@ public ContainerInfo allocateContainer(final ReplicationType type, if (LOG.isTraceEnabled()) { LOG.trace("New container allocated: {}", containerInfo); } - return containerStateManager.getContainer(containerID); + return containerStateManager.getContainer(containerID.getProtobuf()); } finally { lock.writeLock().unlock(); } } @Override - public void deleteContainer(final ContainerID containerID) - throws ContainerNotFoundException { - throw new UnsupportedOperationException("Not yet implemented!"); - } - - @Override - public void updateContainerState(final ContainerID containerID, + public void updateContainerState(final ContainerID id, final LifeCycleEvent event) - throws ContainerNotFoundException { - throw new UnsupportedOperationException("Not yet implemented!"); + throws IOException, InvalidStateTransitionException { + final HddsProtos.ContainerID cid = id.getProtobuf(); + lock.writeLock().lock(); + try { + checkIfContainerExist(cid); + containerStateManager.updateContainerState(cid, event); + } finally { + lock.writeLock().unlock(); + } } @Override - public Set getContainerReplicas( - final ContainerID containerID) throws ContainerNotFoundException { - throw new UnsupportedOperationException("Not yet implemented!"); + public Set getContainerReplicas(final ContainerID id) + throws ContainerNotFoundException { + lock.readLock().lock(); + try { + return Optional.ofNullable(containerStateManager + .getContainerReplicas(id.getProtobuf())) + .orElseThrow(() -> new ContainerNotFoundException("ID " + id)); + } finally { + lock.readLock().unlock(); + } } @Override - public void updateContainerReplica(final ContainerID containerID, + public void updateContainerReplica(final ContainerID id, final ContainerReplica replica) throws ContainerNotFoundException { - throw new UnsupportedOperationException("Not yet implemented!"); + final HddsProtos.ContainerID cid = id.getProtobuf(); + lock.writeLock().lock(); + try { + checkIfContainerExist(cid); + containerStateManager.updateContainerReplica(cid, replica); + } finally { + lock.writeLock().unlock(); + } } @Override - public void removeContainerReplica(final ContainerID containerID, + public void removeContainerReplica(final ContainerID id, final ContainerReplica replica) throws ContainerNotFoundException, ContainerReplicaNotFoundException { - throw new UnsupportedOperationException("Not yet implemented!"); + final HddsProtos.ContainerID cid = id.getProtobuf(); + lock.writeLock().lock(); + try { + checkIfContainerExist(cid); + containerStateManager.removeContainerReplica(cid, replica); + } finally { + lock.writeLock().unlock(); + } } @Override @@ -279,6 +257,27 @@ public void notifyContainerReportProcessing(final boolean isFullReport, throw new UnsupportedOperationException("Not yet implemented!"); } + @Override + public void deleteContainer(final ContainerID id) + throws IOException { + final HddsProtos.ContainerID cid = id.getProtobuf(); + lock.writeLock().lock(); + try { + checkIfContainerExist(cid); + containerStateManager.removeContainer(cid); + } finally { + lock.writeLock().unlock(); + } + } + + private void checkIfContainerExist(final HddsProtos.ContainerID id) + throws ContainerNotFoundException { + if (!containerStateManager.contains(id)) { + throw new ContainerNotFoundException("Container with id #" + + id.getId() + " not found."); + } + } + @Override public void close() throws Exception { containerStateManager.close(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java index 863ca4da66b9..dcedb6cedac3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java @@ -17,7 +17,6 @@ package org.apache.hadoop.hdds.scm.container; import java.io.IOException; -import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -27,6 +26,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; +import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; /** * TODO: Add extensive javadoc. @@ -38,26 +38,6 @@ public interface ContainerManagerV2 extends AutoCloseable { // TODO: Rename this to ContainerManager - /** - * Returns all the container Ids managed by ContainerManager. - * - * @return Set of ContainerID - */ - Set getContainerIDs(); - - /** - * Returns all the containers managed by ContainerManager. - * - * @return List of ContainerInfo - */ - Set getContainers(); - - /** - * Returns all the containers which are in the specified state. - * - * @return List of ContainerInfo - */ - Set getContainers(LifeCycleState state); /** * Returns the ContainerInfo from the container ID. @@ -66,8 +46,6 @@ public interface ContainerManagerV2 extends AutoCloseable { ContainerInfo getContainer(ContainerID containerID) throws ContainerNotFoundException; - boolean exists(ContainerID containerID); - /** * Returns containers under certain conditions. * Search container IDs from start ID(exclusive), @@ -84,6 +62,14 @@ ContainerInfo getContainer(ContainerID containerID) */ List listContainers(ContainerID startID, int count); + + /** + * Returns all the containers which are in the specified state. + * + * @return List of ContainerInfo + */ + List listContainers(LifeCycleState state); + /** * Allocates a new container for a given keyName and replication factor. * @@ -96,24 +82,16 @@ ContainerInfo allocateContainer(ReplicationType type, ReplicationFactor replicationFactor, String owner) throws IOException; - /** - * Deletes a container from SCM. - * - * @param containerID - Container ID - * @throws IOException - */ - void deleteContainer(ContainerID containerID) - throws ContainerNotFoundException; - /** * Update container state. * @param containerID - Container ID * @param event - container life cycle event * @throws IOException + * @throws InvalidStateTransitionException */ void updateContainerState(ContainerID containerID, LifeCycleEvent event) - throws ContainerNotFoundException; + throws IOException, InvalidStateTransitionException; /** * Returns the latest list of replicas for given containerId. @@ -153,18 +131,6 @@ void removeContainerReplica(ContainerID containerID, ContainerReplica replica) void updateDeleteTransactionId(Map deleteTransactionMap) throws IOException; - /** - * Returns ContainerInfo which matches the requirements. - * @param size - the amount of space required in the container - * @param owner - the user which requires space in its owned container - * @param pipeline - pipeline to which the container should belong - * @return ContainerInfo for the matching container. - */ - default ContainerInfo getMatchingContainer(long size, String owner, - Pipeline pipeline) { - return getMatchingContainer(size, owner, pipeline, Collections.emptyList()); - } - /** * Returns ContainerInfo which matches the requirements. * @param size - the amount of space required in the container @@ -185,4 +151,13 @@ ContainerInfo getMatchingContainer(long size, String owner, */ // Is it possible to remove this from the Interface? void notifyContainerReportProcessing(boolean isFullReport, boolean success); + + /** + * Deletes a container from SCM. + * + * @param containerID - Container ID + * @throws IOException + */ + void deleteContainer(ContainerID containerID) + throws IOException; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java index 7bca64f635b5..18dffe72895d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java @@ -120,7 +120,7 @@ public void onMessage(final ContainerReportFromDatanode reportFromDatanode, final Set containersInDn = replicas.parallelStream() .map(ContainerReplicaProto::getContainerID) - .map(ContainerID::valueof).collect(Collectors.toSet()); + .map(ContainerID::valueOf).collect(Collectors.toSet()); final Set missingReplicas = new HashSet<>(containersInSCM); missingReplicas.removeAll(containersInDn); @@ -167,7 +167,7 @@ private void processContainerReplicas(final DatanodeDetails datanodeDetails, } else if (unknownContainerHandleAction.equals( UNKNOWN_CONTAINER_ACTION_DELETE)) { final ContainerID containerId = ContainerID - .valueof(replicaProto.getContainerID()); + .valueOf(replicaProto.getContainerID()); deleteReplica(containerId, datanodeDetails, publisher, "unknown"); } } catch (IOException e) { @@ -221,7 +221,7 @@ private void updateDeleteTransaction(final DatanodeDetails datanodeDetails, for (ContainerReplicaProto replica : replicas) { try { final ContainerInfo containerInfo = containerManager.getContainer( - ContainerID.valueof(replica.getContainerID())); + ContainerID.valueOf(replica.getContainerID())); if (containerInfo.getDeleteTransactionId() > replica.getDeleteTransactionId()) { pendingDeleteStatusList.addPendingDeleteStatus( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index e575c60566b1..0c3772f44825 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -329,7 +329,7 @@ public void addContainerInfo(long containerID, // In Recon, while adding a 'new' CLOSED container, pipeline will be a // random ID, and hence be passed down as null. pipelineManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(containerID)); + ContainerID.valueOf(containerID)); } containerStateCount.incrementAndGet(containerInfo.getState()); } @@ -371,12 +371,8 @@ void updateContainerState(final ContainerID containerID, void updateDeleteTransactionId( final Map deleteTransactionMap) { deleteTransactionMap.forEach((k, v) -> { - try { - containers.getContainerInfo(ContainerID.valueof(k)) - .updateDeleteTransactionId(v); - } catch (ContainerNotFoundException e) { - LOG.warn("Exception while updating delete transaction id.", e); - } + containers.getContainerInfo(ContainerID.valueOf(k)) + .updateDeleteTransactionId(v); }); } @@ -432,18 +428,13 @@ ContainerInfo getMatchingContainer(final long size, String owner, private ContainerInfo findContainerWithSpace(final long size, final NavigableSet searchSet, final String owner, final PipelineID pipelineID) { - try { - // Get the container with space to meet our request. - for (ContainerID id : searchSet) { - final ContainerInfo containerInfo = containers.getContainerInfo(id); - if (containerInfo.getUsedBytes() + size <= this.containerSize) { - containerInfo.updateLastUsedTime(); - return containerInfo; - } + // Get the container with space to meet our request. + for (ContainerID id : searchSet) { + final ContainerInfo containerInfo = containers.getContainerInfo(id); + if (containerInfo.getUsedBytes() + size <= this.containerSize) { + containerInfo.updateLastUsedTime(); + return containerInfo; } - } catch (ContainerNotFoundException e) { - // This should not happen! - LOG.warn("Exception while finding container with space", e); } return null; } @@ -496,7 +487,11 @@ NavigableSet getMatchingContainerIDs(final String owner, */ ContainerInfo getContainer(final ContainerID containerID) throws ContainerNotFoundException { - return containers.getContainerInfo(containerID); + final ContainerInfo container = containers.getContainerInfo(containerID); + if (container != null) { + return container; + } + throw new ContainerNotFoundException(containerID.toString()); } void close() throws IOException { @@ -540,6 +535,9 @@ void removeContainerReplica(final ContainerID containerID, void removeContainer(final ContainerID containerID) throws ContainerNotFoundException { + if (containers.getContainerInfo(containerID) == null) { + throw new ContainerNotFoundException(containerID.toString()); + } containers.removeContainer(containerID); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java index 4f4456ace47d..7f42a971cac4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java @@ -24,12 +24,12 @@ import java.util.NavigableSet; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; -import java.util.concurrent.atomic.AtomicLong; import com.google.common.base.Preconditions; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; @@ -47,15 +47,32 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.Table.KeyValue; import org.apache.hadoop.hdds.utils.db.TableIterator; +import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; import org.apache.hadoop.ozone.common.statemachine.StateMachine; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.FINALIZE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.QUASI_CLOSE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CLOSE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.FORCE_CLOSE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.DELETE; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CLEANUP; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.QUASI_CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETING; +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED; + /** * Default implementation of ContainerStateManager. This implementation * holds the Container States in-memory which is backed by a persistent store. * The persistent store is always kept in sync with the in-memory state changes. + * + * This class is NOT thread safe. All the calls are idempotent. */ public final class ContainerStateManagerImpl implements ContainerStateManagerV2 { @@ -71,13 +88,6 @@ public final class ContainerStateManagerImpl */ private final long containerSize; - /** - * The container ID sequence which is used to create new container. - * This will be removed once we have a Distributed Sequence ID Generator. - */ - @Deprecated - private final AtomicLong nextContainerID; - /** * In-memory representation of Container States. */ @@ -121,7 +131,6 @@ private ContainerStateManagerImpl(final Configuration conf, this.containerStore = containerStore; this.stateMachine = newStateMachine(); this.containerSize = getConfiguredContainerSize(conf); - this.nextContainerID = new AtomicLong(1L); this.containers = new ContainerStateMap(); this.lastUsedMap = new ConcurrentHashMap<>(); @@ -138,40 +147,45 @@ private StateMachine newStateMachine() { final Set finalStates = new HashSet<>(); // These are the steady states of a container. - finalStates.add(LifeCycleState.OPEN); - finalStates.add(LifeCycleState.CLOSED); - finalStates.add(LifeCycleState.DELETED); + finalStates.add(CLOSED); + finalStates.add(DELETED); final StateMachine containerLifecycleSM = - new StateMachine<>(LifeCycleState.OPEN, finalStates); - - containerLifecycleSM.addTransition(LifeCycleState.OPEN, - LifeCycleState.CLOSING, - LifeCycleEvent.FINALIZE); + new StateMachine<>(OPEN, finalStates); - containerLifecycleSM.addTransition(LifeCycleState.CLOSING, - LifeCycleState.QUASI_CLOSED, - LifeCycleEvent.QUASI_CLOSE); + containerLifecycleSM.addTransition(OPEN, CLOSING, FINALIZE); + containerLifecycleSM.addTransition(CLOSING, QUASI_CLOSED, QUASI_CLOSE); + containerLifecycleSM.addTransition(CLOSING, CLOSED, CLOSE); + containerLifecycleSM.addTransition(QUASI_CLOSED, CLOSED, FORCE_CLOSE); + containerLifecycleSM.addTransition(CLOSED, DELETING, DELETE); + containerLifecycleSM.addTransition(DELETING, DELETED, CLEANUP); - containerLifecycleSM.addTransition(LifeCycleState.CLOSING, - LifeCycleState.CLOSED, - LifeCycleEvent.CLOSE); - - containerLifecycleSM.addTransition(LifeCycleState.QUASI_CLOSED, - LifeCycleState.CLOSED, - LifeCycleEvent.FORCE_CLOSE); - - containerLifecycleSM.addTransition(LifeCycleState.CLOSED, - LifeCycleState.DELETING, - LifeCycleEvent.DELETE); - - containerLifecycleSM.addTransition(LifeCycleState.DELETING, - LifeCycleState.DELETED, - LifeCycleEvent.CLEANUP); + /* The following set of transitions are to make state machine + * transition idempotent. + */ + makeStateTransitionIdempotent(containerLifecycleSM, FINALIZE, + CLOSING, QUASI_CLOSED, CLOSED, DELETING, DELETED); + makeStateTransitionIdempotent(containerLifecycleSM, QUASI_CLOSE, + QUASI_CLOSED, CLOSED, DELETING, DELETED); + makeStateTransitionIdempotent(containerLifecycleSM, CLOSE, + CLOSED, DELETING, DELETED); + makeStateTransitionIdempotent(containerLifecycleSM, FORCE_CLOSE, + CLOSED, DELETING, DELETED); + makeStateTransitionIdempotent(containerLifecycleSM, DELETE, + DELETING, DELETED); + makeStateTransitionIdempotent(containerLifecycleSM, CLEANUP, DELETED); return containerLifecycleSM; } + private void makeStateTransitionIdempotent( + final StateMachine sm, + final LifeCycleEvent event, final LifeCycleState... states) { + for (LifeCycleState state : states) { + sm.addTransition(state, state, event); + } + } + /** * Returns the configured container size. * @@ -197,28 +211,26 @@ private void initialize() throws IOException { final ContainerInfo container = iterator.next().getValue(); Preconditions.checkNotNull(container); containers.addContainer(container); - nextContainerID.set(Long.max(container.containerID().getId(), - nextContainerID.get())); if (container.getState() == LifeCycleState.OPEN) { try { pipelineManager.addContainerToPipeline(container.getPipelineID(), - ContainerID.valueof(container.getContainerID())); + container.containerID()); } catch (PipelineNotFoundException ex) { LOG.warn("Found container {} which is in OPEN state with " + "pipeline {} that does not exist. Marking container for " + "closing.", container, container.getPipelineID()); - updateContainerState(container.containerID(), - LifeCycleEvent.FINALIZE); + try { + updateContainerState(container.containerID().getProtobuf(), + LifeCycleEvent.FINALIZE); + } catch (InvalidStateTransitionException e) { + // This cannot happen. + LOG.warn("Unable to finalize Container {}.", container); + } } } } } - @Override - public ContainerID getNextContainerID() { - return ContainerID.valueof(nextContainerID.get()); - } - @Override public Set getContainerIDs() { return containers.getAllContainerIDs(); @@ -230,15 +242,9 @@ public Set getContainerIDs(final LifeCycleState state) { } @Override - public ContainerInfo getContainer(final ContainerID containerID) - throws ContainerNotFoundException { - return containers.getContainerInfo(containerID); - } - - @Override - public Set getContainerReplicas( - final ContainerID containerID) throws ContainerNotFoundException { - return containers.getContainerReplicas(containerID); + public ContainerInfo getContainer(final HddsProtos.ContainerID id) { + return containers.getContainerInfo( + ContainerID.getFromProtobuf(id)); } @Override @@ -254,32 +260,63 @@ public void addContainer(final ContainerInfoProto containerInfo) final ContainerID containerID = container.containerID(); final PipelineID pipelineID = container.getPipelineID(); - /* - * TODO: - * Check if the container already exist in in ContainerStateManager. - * This optimization can be done after moving ContainerNotFoundException - * from ContainerStateMap to ContainerManagerImpl. - */ + if (!containers.contains(containerID)) { + containerStore.put(containerID, container); + try { + containers.addContainer(container); + pipelineManager.addContainerToPipeline(pipelineID, containerID); + } catch (Exception ex) { + containers.removeContainer(containerID); + containerStore.delete(containerID); + throw ex; + } + } + } - containerStore.put(containerID, container); - containers.addContainer(container); - pipelineManager.addContainerToPipeline(pipelineID, containerID); - nextContainerID.incrementAndGet(); + @Override + public boolean contains(final HddsProtos.ContainerID id) { + // TODO: Remove the protobuf conversion after fixing ContainerStateMap. + return containers.contains(ContainerID.getFromProtobuf(id)); } - void updateContainerState(final ContainerID containerID, - final LifeCycleEvent event) - throws IOException { - throw new UnsupportedOperationException("Not yet implemented!"); + public void updateContainerState(final HddsProtos.ContainerID containerID, + final LifeCycleEvent event) + throws IOException, InvalidStateTransitionException { + // TODO: Remove the protobuf conversion after fixing ContainerStateMap. + final ContainerID id = ContainerID.getFromProtobuf(containerID); + if (containers.contains(id)) { + final ContainerInfo info = containers.getContainerInfo(id); + final LifeCycleState oldState = info.getState(); + final LifeCycleState newState = stateMachine.getNextState( + info.getState(), event); + if (newState.getNumber() > oldState.getNumber()) { + containers.updateState(id, info.getState(), newState); + } + } } - void updateContainerReplica(final ContainerID containerID, - final ContainerReplica replica) - throws ContainerNotFoundException { - containers.updateContainerReplica(containerID, replica); + @Override + public Set getContainerReplicas( + final HddsProtos.ContainerID id) { + return containers.getContainerReplicas( + ContainerID.getFromProtobuf(id)); } + @Override + public void updateContainerReplica(final HddsProtos.ContainerID id, + final ContainerReplica replica) { + containers.updateContainerReplica(ContainerID.getFromProtobuf(id), + replica); + } + + @Override + public void removeContainerReplica(final HddsProtos.ContainerID id, + final ContainerReplica replica) { + containers.removeContainerReplica(ContainerID.getFromProtobuf(id), + replica); + + } void updateDeleteTransactionId( final Map deleteTransactionMap) { @@ -291,23 +328,14 @@ ContainerInfo getMatchingContainer(final long size, String owner, throw new UnsupportedOperationException("Not yet implemented!"); } - NavigableSet getMatchingContainerIDs(final String owner, final ReplicationType type, final ReplicationFactor factor, final LifeCycleState state) { throw new UnsupportedOperationException("Not yet implemented!"); } - void removeContainerReplica(final ContainerID containerID, - final ContainerReplica replica) - throws ContainerNotFoundException, ContainerReplicaNotFoundException { - throw new UnsupportedOperationException("Not yet implemented!"); - } - - - void removeContainer(final ContainerID containerID) - throws ContainerNotFoundException { - throw new UnsupportedOperationException("Not yet implemented!"); + public void removeContainer(final HddsProtos.ContainerID id) { + containers.removeContainer(ContainerID.getFromProtobuf(id)); } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java index 3520b0146e23..3a0cf2111f01 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java @@ -20,9 +20,11 @@ import java.io.IOException; import java.util.Set; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.scm.metadata.Replicate; +import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException; /** * A ContainerStateManager is responsible for keeping track of all the @@ -94,10 +96,9 @@ public interface ContainerStateManagerV2 { ************************************************************************/ /** - * Returns a new container ID which can be used for allocating a new - * container. + * */ - ContainerID getNextContainerID(); + boolean contains(HddsProtos.ContainerID containerID); /** * Returns the ID of all the managed containers. @@ -114,14 +115,24 @@ public interface ContainerStateManagerV2 { /** * */ - ContainerInfo getContainer(ContainerID containerID) - throws ContainerNotFoundException; + ContainerInfo getContainer(HddsProtos.ContainerID id); + + /** + * + */ + Set getContainerReplicas(HddsProtos.ContainerID id); /** * */ - Set getContainerReplicas(ContainerID containerID) - throws ContainerNotFoundException; + void updateContainerReplica(HddsProtos.ContainerID id, + ContainerReplica replica); + + /** + * + */ + void removeContainerReplica(HddsProtos.ContainerID id, + ContainerReplica replica); /** * @@ -130,6 +141,21 @@ Set getContainerReplicas(ContainerID containerID) void addContainer(ContainerInfoProto containerInfo) throws IOException; + /** + * + */ + @Replicate + void updateContainerState(HddsProtos.ContainerID id, + HddsProtos.LifeCycleEvent event) + throws IOException, InvalidStateTransitionException; + + /** + * + */ + @Replicate + void removeContainer(HddsProtos.ContainerID containerInfo) + throws IOException; + /** * */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java index ed8756521b0e..3317f4255c52 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java @@ -71,7 +71,7 @@ public void onMessage(final IncrementalContainerReportFromDatanode report, for (ContainerReplicaProto replicaProto : report.getReport().getReportList()) { try { - final ContainerID id = ContainerID.valueof( + final ContainerID id = ContainerID.valueOf( replicaProto.getContainerID()); if (!replicaProto.getState().equals( ContainerReplicaProto.State.DELETED)) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java index 2117e70e13ed..f59e4014d9c3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java @@ -130,7 +130,7 @@ private void loadExistingContainers() throws IOException { try { if (container.getState() == LifeCycleState.OPEN) { pipelineManager.addContainerToPipeline(container.getPipelineID(), - ContainerID.valueof(container.getContainerID())); + ContainerID.valueOf(container.getContainerID())); } } catch (PipelineNotFoundException ex) { LOG.warn("Found a Container {} which is in {} state with pipeline {} " + @@ -216,7 +216,9 @@ public ContainerInfo getContainer(final ContainerID containerID) public boolean exists(ContainerID containerID) { lock.lock(); try { - return (containerStateManager.getContainer(containerID) != null); + Preconditions.checkNotNull( + containerStateManager.getContainer(containerID)); + return true; } catch (ContainerNotFoundException e) { return false; } finally { @@ -290,7 +292,7 @@ public ContainerInfo allocateContainer(final ReplicationType type, // PipelineStateManager. pipelineManager.removeContainerFromPipeline( containerInfo.getPipelineID(), - new ContainerID(containerInfo.getContainerID())); + containerInfo.containerID()); throw ex; } return containerInfo; @@ -404,7 +406,8 @@ public void updateDeleteTransactionId(Map deleteTransactionMap) try(BatchOperation batchOperation = batchHandler.initBatchOperation()) { for (Map.Entry< Long, Long > entry : deleteTransactionMap.entrySet()) { long containerID = entry.getKey(); - ContainerID containerIdObject = new ContainerID(containerID); + + ContainerID containerIdObject = ContainerID.valueOf(containerID); ContainerInfo containerInfo = containerStore.get(containerIdObject); ContainerInfo containerInfoInMem = containerStateManager @@ -493,7 +496,7 @@ protected void addContainerToDB(ContainerInfo containerInfo) throws IOException { try { containerStore - .put(new ContainerID(containerInfo.getContainerID()), containerInfo); + .put(containerInfo.containerID(), containerInfo); // Incrementing here, as allocateBlock to create a container calls // getMatchingContainer() and finally calls this API to add newly // created container to DB. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java index af44a8a043e5..61cff09daa7e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java @@ -153,7 +153,7 @@ public boolean hasContainerID(T key, ContainerID id) { * @return true or false */ public boolean hasContainerID(T key, int id) { - return hasContainerID(key, ContainerID.valueof(id)); + return hasContainerID(key, ContainerID.valueOf(id)); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java index d71049b7052e..4d143e0db2f7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java @@ -18,31 +18,29 @@ package org.apache.hadoop.hdds.scm.container.states; +import java.util.Set; +import java.util.Collections; +import java.util.Map; +import java.util.NavigableSet; +import java.util.TreeSet; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; +import java.util.concurrent.ConcurrentHashMap; + import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.scm.container.ContainerID; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerReplicaNotFoundException; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; + import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.Set; -import java.util.Collections; -import java.util.Map; -import java.util.NavigableSet; -import java.util.TreeSet; -import java.util.concurrent.locks.ReadWriteLock; -import java.util.concurrent.locks.ReentrantReadWriteLock; -import java.util.concurrent.ConcurrentHashMap; -import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes - .CONTAINER_EXISTS; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes .FAILED_TO_CHANGE_CONTAINER_STATE; @@ -76,6 +74,8 @@ * select a container that belongs to user1, with Ratis replication which can * make 3 copies of data. The fact that we will look for open containers by * default and if we cannot find them we will add new containers. + * + * All the calls are idempotent. */ public class ContainerStateMap { private static final Logger LOG = @@ -95,6 +95,7 @@ public class ContainerStateMap { // Container State Map lock should be held before calling into // Update ContainerAttributes. The consistency of ContainerAttributes is // protected by this lock. + // Can we remove this lock? private final ReadWriteLock lock; /** @@ -120,56 +121,57 @@ public ContainerStateMap() { public void addContainer(final ContainerInfo info) throws SCMException { Preconditions.checkNotNull(info, "Container Info cannot be null"); - Preconditions.checkArgument(info.getReplicationFactor().getNumber() > 0, - "ExpectedReplicaCount should be greater than 0"); - lock.writeLock().lock(); try { final ContainerID id = info.containerID(); - if (containerMap.putIfAbsent(id, info) != null) { - LOG.debug("Duplicate container ID detected. {}", id); - throw new - SCMException("Duplicate container ID detected.", - CONTAINER_EXISTS); + if (!contains(id)) { + containerMap.put(id, info); + lifeCycleStateMap.insert(info.getState(), id); + ownerMap.insert(info.getOwner(), id); + factorMap.insert(info.getReplicationFactor(), id); + typeMap.insert(info.getReplicationType(), id); + replicaMap.put(id, ConcurrentHashMap.newKeySet()); + + // Flush the cache of this container type, will be added later when + // get container queries are executed. + flushCache(info); + LOG.trace("Container {} added to ContainerStateMap.", id); } - - lifeCycleStateMap.insert(info.getState(), id); - ownerMap.insert(info.getOwner(), id); - factorMap.insert(info.getReplicationFactor(), id); - typeMap.insert(info.getReplicationType(), id); - replicaMap.put(id, ConcurrentHashMap.newKeySet()); - - // Flush the cache of this container type, will be added later when - // get container queries are executed. - flushCache(info); - LOG.trace("Created container with {} successfully.", id); } finally { lock.writeLock().unlock(); } } + public boolean contains(final ContainerID id) { + lock.readLock().lock(); + try { + return containerMap.containsKey(id); + } finally { + lock.readLock().unlock(); + } + } + /** * Removes a Container Entry from ContainerStateMap. * - * @param containerID - ContainerID - * @throws SCMException - throws if create failed. + * @param id - ContainerID */ - public void removeContainer(final ContainerID containerID) - throws ContainerNotFoundException { - Preconditions.checkNotNull(containerID, "ContainerID cannot be null"); + public void removeContainer(final ContainerID id) { + Preconditions.checkNotNull(id, "ContainerID cannot be null"); lock.writeLock().lock(); try { - checkIfContainerExist(containerID); - // Should we revert back to the original state if any of the below - // remove operation fails? - final ContainerInfo info = containerMap.remove(containerID); - lifeCycleStateMap.remove(info.getState(), containerID); - ownerMap.remove(info.getOwner(), containerID); - factorMap.remove(info.getReplicationFactor(), containerID); - typeMap.remove(info.getReplicationType(), containerID); - // Flush the cache of this container type. - flushCache(info); - LOG.trace("Removed container with {} successfully.", containerID); + if (contains(id)) { + // Should we revert back to the original state if any of the below + // remove operation fails? + final ContainerInfo info = containerMap.remove(id); + lifeCycleStateMap.remove(info.getState(), id); + ownerMap.remove(info.getOwner(), id); + factorMap.remove(info.getReplicationFactor(), id); + typeMap.remove(info.getReplicationType(), id); + // Flush the cache of this container type. + flushCache(info); + LOG.trace("Container {} removed from ContainerStateMap.", id); + } } finally { lock.writeLock().unlock(); } @@ -179,13 +181,11 @@ public void removeContainer(final ContainerID containerID) * Returns the latest state of Container from SCM's Container State Map. * * @param containerID - ContainerID - * @return container info, if found. + * @return container info, if found else null. */ - public ContainerInfo getContainerInfo(final ContainerID containerID) - throws ContainerNotFoundException { + public ContainerInfo getContainerInfo(final ContainerID containerID) { lock.readLock().lock(); try { - checkIfContainerExist(containerID); return containerMap.get(containerID); } finally { lock.readLock().unlock(); @@ -194,19 +194,18 @@ public ContainerInfo getContainerInfo(final ContainerID containerID) /** * Returns the latest list of DataNodes where replica for given containerId - * exist. Throws an SCMException if no entry is found for given containerId. + * exist. * * @param containerID * @return Set */ public Set getContainerReplicas( - final ContainerID containerID) throws ContainerNotFoundException { + final ContainerID containerID) { Preconditions.checkNotNull(containerID); lock.readLock().lock(); try { - checkIfContainerExist(containerID); - return Collections - .unmodifiableSet(replicaMap.get(containerID)); + final Set replicas = replicaMap.get(containerID); + return replicas == null ? null : Collections.unmodifiableSet(replicas); } finally { lock.readLock().unlock(); } @@ -221,14 +220,15 @@ public Set getContainerReplicas( * @param replica */ public void updateContainerReplica(final ContainerID containerID, - final ContainerReplica replica) throws ContainerNotFoundException { + final ContainerReplica replica) { Preconditions.checkNotNull(containerID); lock.writeLock().lock(); try { - checkIfContainerExist(containerID); - Set replicas = replicaMap.get(containerID); - replicas.remove(replica); - replicas.add(replica); + if (contains(containerID)) { + final Set replicas = replicaMap.get(containerID); + replicas.remove(replica); + replicas.add(replica); + } } finally { lock.writeLock().unlock(); } @@ -242,18 +242,13 @@ public void updateContainerReplica(final ContainerID containerID, * @return True of dataNode is removed successfully else false. */ public void removeContainerReplica(final ContainerID containerID, - final ContainerReplica replica) - throws ContainerNotFoundException, ContainerReplicaNotFoundException { + final ContainerReplica replica) { Preconditions.checkNotNull(containerID); Preconditions.checkNotNull(replica); - lock.writeLock().lock(); try { - checkIfContainerExist(containerID); - if(!replicaMap.get(containerID).remove(replica)) { - throw new ContainerReplicaNotFoundException( - "Container #" - + containerID.getId() + ", replica: " + replica); + if (contains(containerID)) { + replicaMap.get(containerID).remove(replica); } } finally { lock.writeLock().unlock(); @@ -264,15 +259,16 @@ public void removeContainerReplica(final ContainerID containerID, * Just update the container State. * @param info ContainerInfo. */ - public void updateContainerInfo(final ContainerInfo info) - throws ContainerNotFoundException { + public void updateContainerInfo(final ContainerInfo info) { + Preconditions.checkNotNull(info); + final ContainerID id = info.containerID(); lock.writeLock().lock(); try { - Preconditions.checkNotNull(info); - checkIfContainerExist(info.containerID()); - final ContainerInfo currentInfo = containerMap.get(info.containerID()); - flushCache(info, currentInfo); - containerMap.put(info.containerID(), info); + if (contains(id)) { + final ContainerInfo currentInfo = containerMap.get(id); + flushCache(info, currentInfo); + containerMap.put(id, info); + } } finally { lock.writeLock().unlock(); } @@ -287,12 +283,16 @@ public void updateContainerInfo(final ContainerInfo info) * @throws SCMException - in case of failure. */ public void updateState(ContainerID containerID, LifeCycleState currentState, - LifeCycleState newState) throws SCMException, ContainerNotFoundException { + LifeCycleState newState) throws SCMException { Preconditions.checkNotNull(currentState); Preconditions.checkNotNull(newState); lock.writeLock().lock(); try { - checkIfContainerExist(containerID); + if (!contains(containerID)) { + return; + } + + // TODO: Simplify this logic. final ContainerInfo currentInfo = containerMap.get(containerID); try { currentInfo.setState(newState); @@ -340,7 +340,12 @@ public void updateState(ContainerID containerID, LifeCycleState currentState, } public Set getAllContainerIDs() { - return Collections.unmodifiableSet(containerMap.keySet()); + lock.readLock().lock(); + try { + return Collections.unmodifiableSet(containerMap.keySet()); + } finally { + lock.readLock().unlock(); + } } /** @@ -535,13 +540,4 @@ private void flushCache(final ContainerInfo... containerInfos) { } } - // TODO: Move container not found exception to upper layer. - private void checkIfContainerExist(ContainerID containerID) - throws ContainerNotFoundException { - if (!containerMap.containsKey(containerID)) { - throw new ContainerNotFoundException("Container with id #" + - containerID.getId() + " not found."); - } - } - } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java index 87c9e9172698..cb02e3171803 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java @@ -38,11 +38,11 @@ public byte[] toPersistedFormat(ContainerID container) throws IOException { @Override public ContainerID fromPersistedFormat(byte[] rawData) throws IOException { - return new ContainerID(longCodec.fromPersistedFormat(rawData)); + return ContainerID.valueOf(longCodec.fromPersistedFormat(rawData)); } @Override public ContainerID copyObject(ContainerID object) { - return new ContainerID(object.getId()); + return ContainerID.valueOf(object.getId()); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index ede679d4d2a9..594527a22e9a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -203,7 +203,7 @@ public ContainerInfo getContainer(long containerID) throws IOException { getScm().checkAdminAccess(remoteUser); try { return scm.getContainerManager() - .getContainer(ContainerID.valueof(containerID)); + .getContainer(ContainerID.valueOf(containerID)); } catch (IOException ex) { auditSuccess = false; AUDIT.logReadFailure( @@ -222,7 +222,7 @@ public ContainerInfo getContainer(long containerID) throws IOException { private ContainerWithPipeline getContainerWithPipelineCommon( long containerID) throws IOException { - final ContainerID cid = ContainerID.valueof(containerID); + final ContainerID cid = ContainerID.valueOf(containerID); final ContainerInfo container = scm.getContainerManager() .getContainer(cid); @@ -268,13 +268,13 @@ public ContainerWithPipeline getContainerWithPipeline(long containerID) AUDIT.logReadSuccess(buildAuditMessageForSuccess( SCMAction.GET_CONTAINER_WITH_PIPELINE, Collections.singletonMap("containerID", - ContainerID.valueof(containerID).toString()))); + ContainerID.valueOf(containerID).toString()))); return cp; } catch (IOException ex) { AUDIT.logReadFailure(buildAuditMessageForFailure( SCMAction.GET_CONTAINER_WITH_PIPELINE, Collections.singletonMap("containerID", - ContainerID.valueof(containerID).toString()), ex)); + ContainerID.valueOf(containerID).toString()), ex)); throw ex; } } @@ -291,13 +291,13 @@ public List getContainerWithPipelineBatch( try { ContainerWithPipeline cp = getContainerWithPipelineCommon(containerID); cpList.add(cp); - strContainerIDs.append(ContainerID.valueof(containerID).toString()); + strContainerIDs.append(ContainerID.valueOf(containerID).toString()); strContainerIDs.append(","); } catch (IOException ex) { AUDIT.logReadFailure(buildAuditMessageForFailure( SCMAction.GET_CONTAINER_WITH_PIPELINE_BATCH, Collections.singletonMap("containerID", - ContainerID.valueof(containerID).toString()), ex)); + ContainerID.valueOf(containerID).toString()), ex)); throw ex; } } @@ -337,7 +337,7 @@ public List listContainer(long startContainerID, // "null" is assigned, so that its handled in the // scm.getContainerManager().listContainer method final ContainerID containerId = startContainerID != 0 ? ContainerID - .valueof(startContainerID) : null; + .valueOf(startContainerID) : null; return scm.getContainerManager(). listContainer(containerId, count); } catch (Exception ex) { @@ -364,7 +364,7 @@ public void deleteContainer(long containerID) throws IOException { try { getScm().checkAdminAccess(remoteUser); scm.getContainerManager().deleteContainer( - ContainerID.valueof(containerID)); + ContainerID.valueOf(containerID)); } catch (Exception ex) { auditSuccess = false; AUDIT.logWriteFailure( @@ -407,7 +407,7 @@ public void closeContainer(long containerID) throws IOException { auditMap.put("remoteUser", remoteUser); try { scm.checkAdminAccess(remoteUser); - final ContainerID cid = ContainerID.valueof(containerID); + final ContainerID cid = ContainerID.valueOf(containerID); final HddsProtos.LifeCycleState state = scm.getContainerManager() .getContainer(cid).getState(); if (!state.equals(HddsProtos.LifeCycleState.OPEN)) { @@ -415,7 +415,7 @@ public void closeContainer(long containerID) throws IOException { ResultCodes.UNEXPECTED_CONTAINER_STATE); } scm.getEventQueue().fireEvent(SCMEvents.CLOSE_CONTAINER, - ContainerID.valueof(containerID)); + ContainerID.valueOf(containerID)); AUDIT.logWriteSuccess(buildAuditMessageForSuccess( SCMAction.CLOSE_CONTAINER, auditMap)); } catch (Exception ex) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index b17729bf62d0..4513857fdb81 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -774,7 +774,7 @@ private void unregisterMXBean() { @VisibleForTesting public ContainerInfo getContainerInfo(long containerID) throws IOException { - return containerManager.getContainer(ContainerID.valueof(containerID)); + return containerManager.getContainer(ContainerID.valueOf(containerID)); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index d4e25536bcab..96147c5616dc 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -410,13 +410,14 @@ private void mockContainerInfo(long containerID, DatanodeDetails dd) .build(); ContainerInfo.Builder builder = new ContainerInfo.Builder(); - builder.setPipelineID(pipeline.getId()) + builder.setContainerID(containerID) + .setPipelineID(pipeline.getId()) .setReplicationType(pipeline.getType()) .setReplicationFactor(pipeline.getFactor()); ContainerInfo containerInfo = builder.build(); Mockito.doReturn(containerInfo).when(containerManager) - .getContainer(ContainerID.valueof(containerID)); + .getContainer(ContainerID.valueOf(containerID)); final Set replicaSet = dns.stream() .map(datanodeDetails -> ContainerReplica.newBuilder() @@ -426,7 +427,7 @@ private void mockContainerInfo(long containerID, DatanodeDetails dd) .build()) .collect(Collectors.toSet()); when(containerManager.getContainerReplicas( - ContainerID.valueof(containerID))) + ContainerID.valueOf(containerID))) .thenReturn(replicaSet); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index daa97266d8e8..fbe4d42b0b85 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -126,7 +126,7 @@ public void testIfCloseContainerEventHadnlerInvoked() { GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer .captureLogs(CloseContainerEventHandler.LOG); eventQueue.fireEvent(CLOSE_CONTAINER, - new ContainerID(Math.abs(RandomUtils.nextInt()))); + ContainerID.valueOf(Math.abs(RandomUtils.nextInt()))); eventQueue.processAll(1000); Assert.assertTrue(logCapturer.getOutput() .contains("Close container Event triggered for container")); @@ -138,7 +138,7 @@ public void testCloseContainerEventWithInvalidContainer() { GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer .captureLogs(CloseContainerEventHandler.LOG); eventQueue.fireEvent(CLOSE_CONTAINER, - new ContainerID(id)); + ContainerID.valueOf(id)); eventQueue.processAll(1000); Assert.assertTrue(logCapturer.getOutput() .contains("Failed to close the container")); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java index 3434825a2e09..09b51f07a97a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java @@ -61,7 +61,7 @@ public void testCloseContainerAction() { queue.fireEvent(SCMEvents.CONTAINER_ACTIONS, containerActions); queue.processAll(1000L); verify(closeContainerEventHandler, times(1)) - .onMessage(ContainerID.valueof(1L), queue); + .onMessage(ContainerID.valueOf(1L), queue); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java index 022d3921df0b..6492e0ac614f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -79,13 +79,37 @@ public void cleanup() throws Exception { @Test public void testAllocateContainer() throws Exception { - Assert.assertTrue(containerManager.getContainerIDs().isEmpty()); + Assert.assertTrue( + containerManager.listContainers(null, Integer.MAX_VALUE).isEmpty()); final ContainerInfo container = containerManager.allocateContainer( HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, "admin"); - Assert.assertEquals(1, containerManager.getContainerIDs().size()); + Assert.assertEquals(1, + containerManager.listContainers(null, Integer.MAX_VALUE).size()); Assert.assertNotNull(containerManager.getContainer( container.containerID())); } -} \ No newline at end of file + @Test + public void testUpdateContainerState() throws Exception { + final ContainerInfo container = containerManager.allocateContainer( + HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, "admin"); + final ContainerID cid = container.containerID(); + Assert.assertEquals(HddsProtos.LifeCycleState.OPEN, + containerManager.getContainer(cid).getState()); + containerManager.updateContainerState(cid, + HddsProtos.LifeCycleEvent.FINALIZE); + Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING, + containerManager.getContainer(cid).getState()); + containerManager.updateContainerState(cid, + HddsProtos.LifeCycleEvent.QUASI_CLOSE); + Assert.assertEquals(HddsProtos.LifeCycleState.QUASI_CLOSED, + containerManager.getContainer(cid).getState()); + containerManager.updateContainerState(cid, + HddsProtos.LifeCycleEvent.FORCE_CLOSE); + Assert.assertEquals(HddsProtos.LifeCycleState.CLOSED, + containerManager.getContainer(cid).getState()); + } + +} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java index 25650762bc59..a45d63718c19 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java @@ -287,7 +287,7 @@ public void testGetContainerReplicaWithParallelUpdate() throws Exception { @Test public void testgetNoneExistentContainer() { try { - containerManager.getContainer(ContainerID.valueof( + containerManager.getContainer(ContainerID.valueOf( random.nextInt() & Integer.MAX_VALUE)); Assert.fail(); } catch (ContainerNotFoundException ex) { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java index 63cc9bfd7893..b7b89880891b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java @@ -39,7 +39,7 @@ public class TestContainerAttribute { @Test public void testInsert() throws SCMException { ContainerAttribute containerAttribute = new ContainerAttribute<>(); - ContainerID id = new ContainerID(42); + ContainerID id = ContainerID.valueOf(42); containerAttribute.insert(1, id); Assert.assertEquals(1, containerAttribute.getCollection(1).size()); @@ -47,7 +47,7 @@ public void testInsert() throws SCMException { // Insert again and verify that it overwrites an existing value. ContainerID newId = - new ContainerID(42); + ContainerID.valueOf(42); containerAttribute.insert(1, newId); Assert.assertEquals(1, containerAttribute.getCollection(1).size()); @@ -59,7 +59,7 @@ public void testHasKey() throws SCMException { ContainerAttribute containerAttribute = new ContainerAttribute<>(); for (int x = 1; x < 42; x++) { - containerAttribute.insert(1, new ContainerID(x)); + containerAttribute.insert(1, ContainerID.valueOf(x)); } Assert.assertTrue(containerAttribute.hasKey(1)); for (int x = 1; x < 42; x++) { @@ -67,7 +67,7 @@ public void testHasKey() throws SCMException { } Assert.assertFalse(containerAttribute.hasContainerID(1, - new ContainerID(42))); + ContainerID.valueOf(42))); } @Test @@ -76,7 +76,7 @@ public void testClearSet() throws SCMException { ContainerAttribute containerAttribute = new ContainerAttribute<>(); for (String k : keyslist) { for (int x = 1; x < 101; x++) { - containerAttribute.insert(k, new ContainerID(x)); + containerAttribute.insert(k, ContainerID.valueOf(x)); } } for (String k : keyslist) { @@ -96,16 +96,16 @@ public void testRemove() throws SCMException { for (String k : keyslist) { for (int x = 1; x < 101; x++) { - containerAttribute.insert(k, new ContainerID(x)); + containerAttribute.insert(k, ContainerID.valueOf(x)); } } for (int x = 1; x < 101; x += 2) { - containerAttribute.remove("Key1", new ContainerID(x)); + containerAttribute.remove("Key1", ContainerID.valueOf(x)); } for (int x = 1; x < 101; x += 2) { Assert.assertFalse(containerAttribute.hasContainerID("Key1", - new ContainerID(x))); + ContainerID.valueOf(x))); } Assert.assertEquals(100, @@ -125,7 +125,7 @@ public void tesUpdate() throws SCMException { String key3 = "Key3"; ContainerAttribute containerAttribute = new ContainerAttribute<>(); - ContainerID id = new ContainerID(42); + ContainerID id = ContainerID.valueOf(42); containerAttribute.insert(key1, id); Assert.assertTrue(containerAttribute.hasContainerID(key1, id)); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java index 50b962d01fc8..3d77e9d14028 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java @@ -193,19 +193,19 @@ public void testOnMessage() throws Exception { deadNodeHandler.onMessage(datanode1, publisher); Set container1Replicas = containerManager - .getContainerReplicas(new ContainerID(container1.getContainerID())); + .getContainerReplicas(ContainerID.valueOf(container1.getContainerID())); Assert.assertEquals(1, container1Replicas.size()); Assert.assertEquals(datanode2, container1Replicas.iterator().next().getDatanodeDetails()); Set container2Replicas = containerManager - .getContainerReplicas(new ContainerID(container2.getContainerID())); + .getContainerReplicas(ContainerID.valueOf(container2.getContainerID())); Assert.assertEquals(1, container2Replicas.size()); Assert.assertEquals(datanode2, container2Replicas.iterator().next().getDatanodeDetails()); Set container3Replicas = containerManager - .getContainerReplicas(new ContainerID(container3.getContainerID())); + .getContainerReplicas(container3.containerID()); Assert.assertEquals(1, container3Replicas.size()); Assert.assertEquals(datanode3, container3Replicas.iterator().next().getDatanodeDetails()); @@ -216,7 +216,7 @@ private void registerReplicas(ContainerManager contManager, throws ContainerNotFoundException { for (DatanodeDetails datanode : datanodes) { contManager.updateContainerReplica( - new ContainerID(container.getContainerID()), + ContainerID.valueOf(container.getContainerID()), ContainerReplica.newBuilder() .setContainerState(ContainerReplicaProto.State.OPEN) .setContainerID(container.containerID()) @@ -236,7 +236,7 @@ private void registerContainers(DatanodeDetails datanode, nodeManager .setContainers(datanode, Arrays.stream(containers) - .map(container -> new ContainerID(container.getContainerID())) + .map(ContainerInfo::containerID) .collect(Collectors.toSet())); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java index 77ed9075ae14..bc1b3dd125d5 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java @@ -52,7 +52,7 @@ private void generateData() { TreeSet currentSet = new TreeSet<>(); for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) { long currentCnIndex = (long) (dnIndex * CONTAINER_COUNT) + cnIndex; - currentSet.add(new ContainerID(currentCnIndex)); + currentSet.add(ContainerID.valueOf(currentCnIndex)); } testData.put(UUID.randomUUID(), currentSet); } @@ -206,7 +206,7 @@ public void testProcessReportDetectNewContainers() throws SCMException { TreeSet addedContainers = new TreeSet<>(); for (int x = 1; x <= newCount; x++) { long cTemp = last.getId() + x; - addedContainers.add(new ContainerID(cTemp)); + addedContainers.add(ContainerID.valueOf(cTemp)); } // This set is the super set of existing containers and new containers. @@ -250,7 +250,7 @@ public void testProcessReportDetectMissingContainers() throws SCMException { for (int x = 0; x < removeCount; x++) { int startBase = (int) first.getId(); long cTemp = r.nextInt(values.size()); - removedContainers.add(new ContainerID(cTemp + startBase)); + removedContainers.add(ContainerID.valueOf(cTemp + startBase)); } // This set is a new set with some containers removed. @@ -282,7 +282,7 @@ public void testProcessReportDetectNewAndMissingContainers() throws Set insertedSet = new TreeSet<>(); // Insert nodes from 1..30 for (int x = 1; x <= 30; x++) { - insertedSet.add(new ContainerID(x)); + insertedSet.add(ContainerID.valueOf(x)); } @@ -296,7 +296,7 @@ public void testProcessReportDetectNewAndMissingContainers() throws for (int x = 0; x < removeCount; x++) { int startBase = (int) first.getId(); long cTemp = r.nextInt(values.size()); - removedContainers.add(new ContainerID(cTemp + startBase)); + removedContainers.add(ContainerID.valueOf(cTemp + startBase)); } Set newSet = new TreeSet<>(values); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index a8f03bb6ad68..642378f6786b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -162,7 +162,7 @@ public void testUpdatePipelineStates() throws Exception { PipelineID pipelineID = pipeline.getId(); pipelineManager.openPipeline(pipelineID); - pipelineManager.addContainerToPipeline(pipelineID, ContainerID.valueof(1)); + pipelineManager.addContainerToPipeline(pipelineID, ContainerID.valueOf(1)); Assert.assertTrue(pipelineManager .getPipelines(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, @@ -262,7 +262,7 @@ public void testRemovePipeline() throws Exception { // Open the pipeline pipelineManager.openPipeline(pipeline.getId()); pipelineManager - .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1)); + .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1)); Assert.assertTrue(pipelineManager .getPipelines(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java index 8252e2c9df25..43d5398a2513 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java @@ -290,14 +290,14 @@ public void testAddAndGetContainer() throws IOException { stateManager.addPipeline(pipeline); pipeline = stateManager.getPipeline(pipeline.getId()); stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); + ContainerID.valueOf(++containerID)); // move pipeline to open state stateManager.openPipeline(pipeline.getId()); stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); + ContainerID.valueOf(++containerID)); stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); + ContainerID.valueOf(++containerID)); //verify the number of containers returned Set containerIDs = @@ -307,7 +307,7 @@ public void testAddAndGetContainer() throws IOException { removePipeline(pipeline); try { stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); + ContainerID.valueOf(++containerID)); Assert.fail("Container should not have been added"); } catch (IOException e) { // Can not add a container to removed pipeline @@ -322,7 +322,7 @@ public void testRemovePipeline() throws IOException { // close the pipeline stateManager.openPipeline(pipeline.getId()); stateManager - .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1)); + .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1)); try { stateManager.removePipeline(pipeline.getId()); @@ -347,26 +347,26 @@ public void testRemoveContainer() throws IOException { stateManager.openPipeline(pipeline.getId()); stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(containerID)); + ContainerID.valueOf(containerID)); Assert.assertEquals(1, stateManager.getContainers(pipeline.getId()).size()); stateManager.removeContainerFromPipeline(pipeline.getId(), - ContainerID.valueof(containerID)); + ContainerID.valueOf(containerID)); Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size()); // add two containers in the pipeline stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); + ContainerID.valueOf(++containerID)); stateManager.addContainerToPipeline(pipeline.getId(), - ContainerID.valueof(++containerID)); + ContainerID.valueOf(++containerID)); Assert.assertEquals(2, stateManager.getContainers(pipeline.getId()).size()); // move pipeline to closing state stateManager.finalizePipeline(pipeline.getId()); stateManager.removeContainerFromPipeline(pipeline.getId(), - ContainerID.valueof(containerID)); + ContainerID.valueOf(containerID)); stateManager.removeContainerFromPipeline(pipeline.getId(), - ContainerID.valueof(--containerID)); + ContainerID.valueOf(--containerID)); Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size()); // clean up diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java index 7f537369306f..9cc9b3e8008b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java @@ -183,7 +183,7 @@ public void testRemovePipeline() throws IOException { HddsProtos.ReplicationFactor.THREE); pipelineManager.openPipeline(pipeline.getId()); pipelineManager - .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1)); + .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1)); pipelineManager.closePipeline(pipeline, false); pipelineManager.close(); @@ -428,7 +428,7 @@ public void testActivateDeactivatePipeline() throws IOException { final PipelineID pid = pipeline.getId(); pipelineManager.openPipeline(pid); - pipelineManager.addContainerToPipeline(pid, ContainerID.valueof(1)); + pipelineManager.addContainerToPipeline(pid, ContainerID.valueOf(1)); Assert.assertTrue(pipelineManager .getPipelines(HddsProtos.ReplicationType.RATIS, diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java index 03cdb721d51d..adffbd81a50e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java @@ -324,7 +324,7 @@ private void handleException(BlockOutputStreamEntry streamEntry, // if the container needs to be excluded , add the container to the // exclusion list , otherwise add the pipeline to the exclusion list if (containerExclusionException) { - excludeList.addConatinerId(ContainerID.valueof(containerId)); + excludeList.addConatinerId(ContainerID.valueOf(containerId)); } else { excludeList.addPipeline(pipelineId); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java index 3842818a8526..70f41529f135 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java @@ -252,7 +252,7 @@ public void testGetMatchingContainerWithExcludedList() throws IOException { ContainerInfo info = containerManager .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE, container1.getPipeline(), - new HashSet<>(Collections.singletonList(new ContainerID(1)))); + new HashSet<>(Collections.singletonList(ContainerID.valueOf(1)))); Assert.assertNotEquals(container1.getContainerInfo().getContainerID(), info.getContainerID()); } @@ -277,8 +277,8 @@ public void testCreateContainerLogicWithExcludedList() throws IOException { ContainerInfo info = containerManager .getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE, container1.getPipeline(), - new HashSet<>(Arrays.asList(new ContainerID(1), new - ContainerID(2), new ContainerID(3)))); + new HashSet<>(Arrays.asList(ContainerID.valueOf(1), + ContainerID.valueOf(2), ContainerID.valueOf(3)))); Assert.assertEquals(info.getContainerID(), 4); } @@ -418,7 +418,7 @@ public void testReplicaMap() throws Exception { .setUuid(UUID.randomUUID()).build(); // Test 1: no replica's exist - ContainerID containerID = ContainerID.valueof(RandomUtils.nextLong()); + ContainerID containerID = ContainerID.valueOf(RandomUtils.nextLong()); Set replicaSet; try { containerStateManager.getContainerReplicas(containerID); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java index cbe84b6ad7a2..cc6824ea38d7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java @@ -113,7 +113,7 @@ public void testContainerOpsMetrics() throws IOException { "NumSuccessfulDeleteContainers", metrics); containerManager.deleteContainer( - new ContainerID(containerInfo.getContainerID())); + ContainerID.valueOf(containerInfo.getContainerID())); metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers", @@ -123,7 +123,7 @@ public void testContainerOpsMetrics() throws IOException { try { // Give random container to delete. containerManager.deleteContainer( - new ContainerID(RandomUtils.nextLong(10000, 20000))); + ContainerID.valueOf(RandomUtils.nextLong(10000, 20000))); fail("testContainerOpsMetrics failed"); } catch (IOException ex) { // Here it should fail, so it should have the old metric value. @@ -135,7 +135,7 @@ public void testContainerOpsMetrics() throws IOException { } containerManager.listContainer( - new ContainerID(containerInfo.getContainerID()), 1); + ContainerID.valueOf(containerInfo.getContainerID()), 1); metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName()); Assert.assertEquals(getLongCounter("NumListContainerOps", metrics), 1); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java index dd543ed7841c..69615e88eb7f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java @@ -58,21 +58,21 @@ public static void closeContainers( StorageContainerManager scm) throws Exception { performOperationOnKeyContainers((blockID) -> { if (scm.getContainerManager() - .getContainer(ContainerID.valueof(blockID.getContainerID())) + .getContainer(ContainerID.valueOf(blockID.getContainerID())) .getState() == HddsProtos.LifeCycleState.OPEN) { scm.getContainerManager() - .updateContainerState(ContainerID.valueof(blockID.getContainerID()), + .updateContainerState(ContainerID.valueOf(blockID.getContainerID()), HddsProtos.LifeCycleEvent.FINALIZE); } if (scm.getContainerManager() - .getContainer(ContainerID.valueof(blockID.getContainerID())) + .getContainer(ContainerID.valueOf(blockID.getContainerID())) .getState() == HddsProtos.LifeCycleState.CLOSING) { scm.getContainerManager() - .updateContainerState(ContainerID.valueof(blockID.getContainerID()), + .updateContainerState(ContainerID.valueOf(blockID.getContainerID()), HddsProtos.LifeCycleEvent.CLOSE); } Assert.assertFalse(scm.getContainerManager() - .getContainer(ContainerID.valueof(blockID.getContainerID())) + .getContainer(ContainerID.valueOf(blockID.getContainerID())) .isOpen()); }, omKeyLocationInfoGroups); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java index d9f75788ec8b..9fc8927b5357 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java @@ -160,7 +160,7 @@ public void testContainerReplication() throws Exception { long containerID = omKeyLocationInfo.getContainerID(); PipelineID pipelineID = cluster.getStorageContainerManager().getContainerManager() - .getContainer(new ContainerID(containerID)).getPipelineID(); + .getContainer(ContainerID.valueOf(containerID)).getPipelineID(); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(pipelineID); @@ -168,13 +168,13 @@ public void testContainerReplication() throws Exception { HddsProtos.LifeCycleState containerState = cluster.getStorageContainerManager().getContainerManager() - .getContainer(new ContainerID(containerID)).getState(); + .getContainer(ContainerID.valueOf(containerID)).getState(); LoggerFactory.getLogger(TestContainerReplicationEndToEnd.class).info( "Current Container State is {}", containerState); if ((containerState != HddsProtos.LifeCycleState.CLOSING) && (containerState != HddsProtos.LifeCycleState.CLOSED)) { cluster.getStorageContainerManager().getContainerManager() - .updateContainerState(new ContainerID(containerID), + .updateContainerState(ContainerID.valueOf(containerID), HddsProtos.LifeCycleEvent.FINALIZE); } // wait for container to move to OPEN state in SCM diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java index a9c0706e04ac..2de63d5ecf9d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java @@ -177,7 +177,7 @@ public void testBlockWritesWithDnFailures() throws Exception { long containerId = locationInfoList.get(0).getContainerID(); ContainerInfo container = cluster.getStorageContainerManager() .getContainerManager() - .getContainer(ContainerID.valueof(containerId)); + .getContainer(ContainerID.valueOf(containerId)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); @@ -217,7 +217,7 @@ public void testWriteSmallFile() throws Exception { BlockID blockId = locationInfoList.get(0).getBlockID(); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerId)); + .getContainer(ContainerID.valueOf(containerId)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); @@ -280,7 +280,7 @@ public void testContainerExclusionWithClosedContainerException() key.flush(); Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds() - .contains(ContainerID.valueof(containerId))); + .contains(ContainerID.valueOf(containerId))); Assert.assertTrue( keyOutputStream.getExcludeList().getDatanodes().isEmpty()); Assert.assertTrue( @@ -328,7 +328,7 @@ public void testDatanodeExclusionWithMajorityCommit() throws Exception { BlockID blockId = streamEntryList.get(0).getBlockID(); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerId)); + .getContainer(ContainerID.valueOf(containerId)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); @@ -391,7 +391,7 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { BlockID blockId = streamEntryList.get(0).getBlockID(); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerId)); + .getContainer(ContainerID.valueOf(containerId)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java index 76027f7e295a..57158bbe8671 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java @@ -183,7 +183,7 @@ public void testPipelineExclusionWithPipelineFailure() throws Exception { BlockID blockId = streamEntryList.get(0).getBlockID(); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerId)); + .getContainer(ContainerID.valueOf(containerId)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java index b435ce98057f..2a97dab91c31 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java @@ -159,7 +159,7 @@ public void testMultiBlockWritesWithDnFailures() throws Exception { long containerId = locationInfoList.get(1).getContainerID(); ContainerInfo container = cluster.getStorageContainerManager() .getContainerManager() - .getContainer(ContainerID.valueof(containerId)); + .getContainer(ContainerID.valueOf(containerId)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); @@ -207,7 +207,7 @@ public void testMultiBlockWritesWithIntermittentDnFailures() BlockID blockId = streamEntryList.get(0).getBlockID(); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerId)); + .getContainer(ContainerID.valueOf(containerId)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java index dd871f339056..76861d45bf0d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java @@ -155,7 +155,7 @@ public void testGroupMismatchExceptionHandling() throws Exception { Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerID)); + .getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); @@ -201,7 +201,7 @@ public void testMaxRetriesByOzoneClient() throws Exception { containerID = entry.getBlockID().getContainerID(); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerID)); + .getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java index e202ca18afd5..a96cbe653408 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java @@ -147,7 +147,7 @@ public void testGroupMismatchExceptionHandling() throws Exception { Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerID)); + .getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index b7b75a4f8403..24b8620ef5e4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -1542,7 +1542,7 @@ public void testGetKeyDetails() throws IOException { // Second, sum the data size from chunks in Container via containerID // and localID, make sure the size equals to the size from keyDetails. ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); + .getContainerManager().getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java index 914845931df5..5e8e5ccb31dc 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java @@ -169,7 +169,7 @@ public void testPutKeyAndGetKeyThreeNodes() .assertEquals(value.getBytes().length, keyLocations.get(0).getLength()); ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); + .getContainerManager().getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java index fab2ea387d34..21bbc04087e3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java @@ -174,7 +174,7 @@ public static void waitForPipelineClose(MiniOzoneCluster cluster, for (long containerID : containerIdList) { ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerID)); + .getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); @@ -250,7 +250,7 @@ public static void waitForContainerClose(MiniOzoneCluster cluster, for (long containerID : containerIdList) { ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerID)); + .getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); @@ -271,7 +271,7 @@ public static void waitForContainerClose(MiniOzoneCluster cluster, // send the order to close the container cluster.getStorageContainerManager().getEventQueue() .fireEvent(SCMEvents.CLOSE_CONTAINER, - ContainerID.valueof(containerID)); + ContainerID.valueOf(containerID)); } } int index = 0; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java index 6b40179d1ef8..853f2cd71a71 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java @@ -122,7 +122,7 @@ public void testIfCloseContainerCommandHandlerIsInvoked() throws Exception { long containerID = omKeyLocationInfo.getContainerID(); ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); + .getContainerManager().getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); @@ -179,7 +179,7 @@ public void testCloseContainerViaStandAlone() long containerID = omKeyLocationInfo.getContainerID(); ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); + .getContainerManager().getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); @@ -232,7 +232,7 @@ public void testCloseContainerViaRatis() throws IOException, long containerID = omKeyLocationInfo.getContainerID(); ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); + .getContainerManager().getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); @@ -295,7 +295,7 @@ public void testQuasiCloseTransitionViaRatis() long containerID = omKeyLocationInfo.getContainerID(); ContainerInfo container = cluster.getStorageContainerManager() - .getContainerManager().getContainer(ContainerID.valueof(containerID)); + .getContainerManager().getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager() .getPipelineManager().getPipeline(container.getPipelineID()); List datanodes = pipeline.getNodes(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java index 831c7291536c..8bd054bfe51a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java @@ -107,7 +107,7 @@ public void test() throws Exception { cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); - ContainerID containerId = ContainerID.valueof( + ContainerID containerId = ContainerID.valueOf( omKeyLocationInfo.getContainerID()); ContainerInfo container = cluster.getStorageContainerManager() .getContainerManager().getContainer(containerId); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index 28b58d9da027..61c33696c865 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -252,7 +252,7 @@ private ContainerID getContainerID(String keyName) throws IOException { cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); - return ContainerID.valueof( + return ContainerID.valueOf( omKeyLocationInfo.getContainerID()); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java index 631d9448ce00..fbdee7e5eabb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java @@ -182,7 +182,7 @@ public void testOpenContainerIntegrity() throws Exception { ContainerManager cm = cluster.getStorageContainerManager() .getContainerManager(); Set replicas = cm.getContainerReplicas( - ContainerID.valueof(c.getContainerData().getContainerID())); + ContainerID.valueOf(c.getContainerData().getContainerID())); Assert.assertEquals(1, replicas.size()); ContainerReplica r = replicas.iterator().next(); Assert.assertEquals(StorageContainerDatanodeProtocolProtos. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java index 7f049a3f6585..1a4dddce9a90 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java @@ -128,7 +128,7 @@ public void testContainerReportKeyWrite() throws Exception { ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID()); Set replicas = scm.getContainerManager().getContainerReplicas( - new ContainerID(keyInfo.getContainerID())); + ContainerID.valueOf(keyInfo.getContainerID())); Assert.assertTrue(replicas.size() == 1); replicas.stream().forEach(rp -> Assert.assertTrue(rp.getDatanodeDetails().getParent() != null)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java index 9092cc5d42f1..ecb2a46de0b1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java @@ -202,6 +202,6 @@ public void testReconRestart() throws Exception { LambdaTestUtils.await(90000, 5000, () -> (newReconScm.getContainerManager() - .exists(ContainerID.valueof(containerID)))); + .exists(ContainerID.valueOf(containerID)))); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java index 394c102106e4..3afe48396516 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java @@ -159,16 +159,18 @@ public void testSCMContainerStateCount() throws Exception { if (i % 2 == 0) { containerID = containerInfoList.get(i).getContainerID(); scmContainerManager.updateContainerState( - new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE); - assertEquals(scmContainerManager.getContainer(new ContainerID( + ContainerID.valueOf(containerID), + HddsProtos.LifeCycleEvent.FINALIZE); + assertEquals(scmContainerManager.getContainer(ContainerID.valueOf( containerID)).getState(), HddsProtos.LifeCycleState.CLOSING); } else { containerID = containerInfoList.get(i).getContainerID(); scmContainerManager.updateContainerState( - new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE); + ContainerID.valueOf(containerID), + HddsProtos.LifeCycleEvent.FINALIZE); scmContainerManager.updateContainerState( - new ContainerID(containerID), HddsProtos.LifeCycleEvent.CLOSE); - assertEquals(scmContainerManager.getContainer(new ContainerID( + ContainerID.valueOf(containerID), HddsProtos.LifeCycleEvent.CLOSE); + assertEquals(scmContainerManager.getContainer(ContainerID.valueOf( containerID)).getState(), HddsProtos.LifeCycleState.CLOSED); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java index 1778b846d49f..10522cb7a48b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java @@ -246,7 +246,7 @@ public Response getMissingContainers() { long containerID = container.getContainerId(); try { ContainerInfo containerInfo = - containerManager.getContainer(new ContainerID(containerID)); + containerManager.getContainer(ContainerID.valueOf(containerID)); long keyCount = containerInfo.getNumberOfKeys(); UUID pipelineID = containerInfo.getPipelineID().getId(); @@ -307,7 +307,7 @@ public Response getUnhealthyContainers( for (UnhealthyContainers c : containers) { long containerID = c.getContainerId(); ContainerInfo containerInfo = - containerManager.getContainer(new ContainerID(containerID)); + containerManager.getContainer(ContainerID.valueOf(containerID)); long keyCount = containerInfo.getNumberOfKeys(); UUID pipelineID = containerInfo.getPipelineID().getId(); List datanodes = diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index 315dd5c4e34c..f005509a9215 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -97,7 +97,7 @@ public synchronized void run() { private ContainerHealthStatus setCurrentContainer(long recordId) throws ContainerNotFoundException { ContainerInfo container = - containerManager.getContainer(new ContainerID(recordId)); + containerManager.getContainer(ContainerID.valueOf(recordId)); Set replicas = containerManager.getContainerReplicas(container.containerID()); return new ContainerHealthStatus(container, replicas, placementPolicy); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java index dff4709f56b1..c32ce05578b2 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java @@ -162,7 +162,7 @@ containerInfo, getPipelineManager(), containerInfo.containerID(), ex); getPipelineManager().removeContainerFromPipeline( containerInfo.getPipelineID(), - new ContainerID(containerInfo.getContainerID())); + ContainerID.valueOf(containerInfo.getContainerID())); throw ex; } finally { getLock().unlock(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java index 228a65793099..391d2c55d471 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java @@ -56,7 +56,7 @@ public void onMessage(final ContainerReportFromDatanode reportFromDatanode, List reportsList = containerReport.getReportsList(); for (ContainerReplicaProto containerReplicaProto : reportsList) { - final ContainerID id = ContainerID.valueof( + final ContainerID id = ContainerID.valueOf( containerReplicaProto.getContainerID()); try { containerManager.checkAndAddNewContainer(id, diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java index 0262c8bc0447..863ef4674fbe 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java @@ -70,7 +70,7 @@ public void onMessage(final IncrementalContainerReportFromDatanode report, for (ContainerReplicaProto replicaProto : report.getReport().getReportList()) { try { - final ContainerID id = ContainerID.valueof( + final ContainerID id = ContainerID.valueOf( replicaProto.getContainerID()); try { containerManager.checkAndAddNewContainer(id, replicaProto.getState(), diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java index 6ba6f5618670..514f919fb6e7 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java @@ -98,7 +98,7 @@ public class TestContainerEndpoint { private boolean isSetupDone = false; private ContainerSchemaManager containerSchemaManager; private ReconOMMetadataManager reconOMMetadataManager; - private ContainerID containerID = new ContainerID(1L); + private ContainerID containerID = ContainerID.valueOf(1L); private PipelineID pipelineID; private long keyCount = 5L; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java index 0a3546a6878c..0bfa1790ef84 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java @@ -52,7 +52,7 @@ public void setup() { container = mock(ContainerInfo.class); when(container.getReplicationFactor()) .thenReturn(HddsProtos.ReplicationFactor.THREE); - when(container.containerID()).thenReturn(new ContainerID(123456)); + when(container.containerID()).thenReturn(ContainerID.valueOf(123456)); when(container.getContainerID()).thenReturn((long)123456); when(placementPolicy.validateContainerPlacement( Mockito.anyList(), Mockito.anyInt())) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index d97b143dc380..890c242f3913 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -89,19 +89,19 @@ public void testRun() throws Exception { when(containerManagerMock.getContainer(c.containerID())).thenReturn(c); } // Under replicated - when(containerManagerMock.getContainerReplicas(new ContainerID(1L))) + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L))) .thenReturn(getMockReplicas(1L, State.CLOSED, State.UNHEALTHY)); // return one UNHEALTHY replica for container ID 2 -> Missing - when(containerManagerMock.getContainerReplicas(new ContainerID(2L))) + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L))) .thenReturn(getMockReplicas(2L, State.UNHEALTHY)); // return 0 replicas for container ID 3 -> Missing - when(containerManagerMock.getContainerReplicas(new ContainerID(3L))) + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L))) .thenReturn(Collections.emptySet()); // Return 5 Healthy -> Over replicated - when(containerManagerMock.getContainerReplicas(new ContainerID(4L))) + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(4L))) .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED)); @@ -110,11 +110,11 @@ public void testRun() throws Exception { State.CLOSED, State.CLOSED, State.CLOSED); placementMock.setMisRepWhenDnPresent( misReplicas.iterator().next().getDatanodeDetails().getUuid()); - when(containerManagerMock.getContainerReplicas(new ContainerID(5L))) + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(5L))) .thenReturn(misReplicas); // Return 3 Healthy -> Healthy container - when(containerManagerMock.getContainerReplicas(new ContainerID(6L))) + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(6L))) .thenReturn(getMockReplicas(6L, State.CLOSED, State.CLOSED, State.CLOSED)); @@ -164,20 +164,20 @@ public void testRun() throws Exception { // Now run the job again, to check that relevant records are updated or // removed as appropriate. Need to adjust the return value for all the mocks // Under replicated -> Delta goes from 2 to 1 - when(containerManagerMock.getContainerReplicas(new ContainerID(1L))) + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L))) .thenReturn(getMockReplicas(1L, State.CLOSED, State.CLOSED)); // ID 2 was missing - make it healthy now - when(containerManagerMock.getContainerReplicas(new ContainerID(2L))) + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L))) .thenReturn(getMockReplicas(2L, State.CLOSED, State.CLOSED, State.CLOSED)); // return 0 replicas for container ID 3 -> Still Missing - when(containerManagerMock.getContainerReplicas(new ContainerID(3L))) + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L))) .thenReturn(Collections.emptySet()); // Return 4 Healthy -> Delta changes from -2 to -1 - when(containerManagerMock.getContainerReplicas(new ContainerID(4L))) + when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(4L))) .thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED, State.CLOSED, State.CLOSED)); @@ -215,7 +215,7 @@ private Set getMockReplicas( replicas.add(ContainerReplica.newBuilder() .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails()) .setContainerState(s) - .setContainerID(new ContainerID(containerId)) + .setContainerID(ContainerID.valueOf(containerId)) .setSequenceId(1) .build()); } @@ -229,7 +229,7 @@ private List getMockContainers(int num) { when(c.getContainerID()).thenReturn((long)i); when(c.getReplicationFactor()) .thenReturn(HddsProtos.ReplicationFactor.THREE); - when(c.containerID()).thenReturn(new ContainerID(i)); + when(c.containerID()).thenReturn(ContainerID.valueOf(i)); containers.add(c); } return containers; diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java index 62baf1298ff7..ccc9de381910 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java @@ -59,7 +59,7 @@ public void setup() { container = mock(ContainerInfo.class); when(container.getReplicationFactor()) .thenReturn(HddsProtos.ReplicationFactor.THREE); - when(container.containerID()).thenReturn(new ContainerID(123456)); + when(container.containerID()).thenReturn(ContainerID.valueOf(123456)); when(container.getContainerID()).thenReturn((long)123456); when(placementPolicy.validateContainerPlacement( Mockito.anyList(), Mockito.anyInt())) diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java index 783f42ca3929..a5ee0a251001 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java @@ -113,7 +113,7 @@ private StorageContainerServiceProvider getScmServiceProvider() Pipeline pipeline = getRandomPipeline(); getPipelineManager().addPipeline(pipeline); - ContainerID containerID = new ContainerID(100L); + ContainerID containerID = ContainerID.valueOf(100L); ContainerInfo containerInfo = new ContainerInfo.Builder() .setContainerID(containerID.getId()) @@ -140,7 +140,7 @@ protected Table getContainerTable() protected ContainerWithPipeline getTestContainer(LifeCycleState state) throws IOException { - ContainerID containerID = new ContainerID(100L); + ContainerID containerID = ContainerID.valueOf(100L); Pipeline pipeline = getRandomPipeline(); pipelineManager.addPipeline(pipeline); ContainerInfo containerInfo = @@ -159,7 +159,7 @@ protected ContainerWithPipeline getTestContainer(LifeCycleState state) protected ContainerWithPipeline getTestContainer(long id, LifeCycleState state) throws IOException { - ContainerID containerID = new ContainerID(id); + ContainerID containerID = ContainerID.valueOf(id); Pipeline pipeline = getRandomPipeline(); pipelineManager.addPipeline(pipeline); ContainerInfo containerInfo = diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java index 9f47779e3b33..49a5f397cc83 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java @@ -100,7 +100,7 @@ public void testAddNewClosedContainer() throws IOException { @Test public void testCheckAndAddNewContainer() throws IOException { - ContainerID containerID = new ContainerID(100L); + ContainerID containerID = ContainerID.valueOf(100L); ReconContainerManager containerManager = getContainerManager(); assertFalse(containerManager.exists(containerID)); DatanodeDetails datanodeDetails = randomDatanodeDetails(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java index 1b42f21712de..97eaf96369a0 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java @@ -62,7 +62,7 @@ public class TestReconIncrementalContainerReportHandler @Test public void testProcessICR() throws IOException, NodeNotFoundException { - ContainerID containerID = new ContainerID(100L); + ContainerID containerID = ContainerID.valueOf(100L); DatanodeDetails datanodeDetails = randomDatanodeDetails(); IncrementalContainerReportFromDatanode reportMock = mock(IncrementalContainerReportFromDatanode.class); From 5f3981c40484f739ac6d258d95446ac2b2cdd36a Mon Sep 17 00:00:00 2001 From: Rui Wang Date: Sat, 24 Oct 2020 21:16:24 +0530 Subject: [PATCH 21/51] HDDS-4115. CLI command to show current SCM leader and follower status. --- .../org/apache/hadoop/hdds/scm/ScmInfo.java | 32 +++++++- .../hadoop/hdds/scm/client/ScmClient.java | 5 +- ...ocationProtocolClientSideTranslatorPB.java | 4 +- .../src/main/proto/hdds.proto | 1 + .../hadoop/hdds/scm/ha/SCMHAManager.java | 6 ++ .../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 11 +++ ...ocationProtocolServerSideTranslatorPB.java | 2 +- .../scm/server/SCMClientProtocolServer.java | 3 +- .../scm/server/StorageContainerManager.java | 4 + .../hadoop/hdds/scm/ha/MockSCMHAManager.java | 9 +++ .../scm/cli/ContainerOperationClient.java | 4 + .../src/main/smoketest/admincli/scmha.robot | 28 +++++++ .../hadoop/ozone/shell/TestScmAdminHA.java | 79 +++++++++++++++++++ .../admin/scm/GetScmRatisRolesSubcommand.java | 46 +++++++++++ .../hadoop/ozone/admin/scm/ScmAdmin.java | 60 ++++++++++++++ .../hadoop/ozone/admin/scm/package-info.java | 22 ++++++ 16 files changed, 310 insertions(+), 6 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/package-info.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java index 6236febb7b12..b9d823e8d817 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java @@ -18,6 +18,9 @@ package org.apache.hadoop.hdds.scm; +import java.util.ArrayList; +import java.util.List; + /** * ScmInfo wraps the result returned from SCM#getScmInfo which * contains clusterId and the SCM Id. @@ -25,6 +28,7 @@ public final class ScmInfo { private String clusterId; private String scmId; + private List peerRoles; /** * Builder for ScmInfo. @@ -32,6 +36,11 @@ public final class ScmInfo { public static class Builder { private String clusterId; private String scmId; + private List peerRoles; + + public Builder() { + peerRoles = new ArrayList<>(); + } /** * sets the cluster id. @@ -53,14 +62,25 @@ public Builder setScmId(String id) { return this; } + /** + * Set peer address in Scm HA. + * @param roles ratis peer address in the format of [ip|hostname]:port + * @return Builder for scmInfo + */ + public Builder setRatisPeerRoles(List roles) { + peerRoles.addAll(roles); + return this; + } + public ScmInfo build() { - return new ScmInfo(clusterId, scmId); + return new ScmInfo(clusterId, scmId, peerRoles); } } - private ScmInfo(String clusterId, String scmId) { + private ScmInfo(String clusterId, String scmId, List peerRoles) { this.clusterId = clusterId; this.scmId = scmId; + this.peerRoles = peerRoles; } /** @@ -78,4 +98,12 @@ public String getClusterId() { public String getScmId() { return scmId; } + + /** + * Gets the list of peer roles (currently address) in Scm HA. + * @return List of peer address + */ + public List getRatisPeerRoles() { + return peerRoles; + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index e4369fa86272..7c3c94cb7ae1 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -246,5 +246,8 @@ Map> getSafeModeRuleStatuses() */ boolean getReplicationManagerStatus() throws IOException; - + /** + * returns the list of ratis peer roles. Currently only include peer address. + */ + List getScmRatisRoles() throws IOException; } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index 0733940deb2c..cf888697313b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -444,7 +444,9 @@ public ScmInfo getScmInfo() throws IOException { .getGetScmInfoResponse(); ScmInfo.Builder builder = new ScmInfo.Builder() .setClusterId(resp.getClusterId()) - .setScmId(resp.getScmId()); + .setScmId(resp.getScmId()) + .setRatisPeerRoles(resp.getPeerRolesList()); + return builder.build(); } diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto index d89e7b4c2140..f0c9b37a6758 100644 --- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto +++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto @@ -211,6 +211,7 @@ message GetScmInfoRequestProto { message GetScmInfoResponseProto { required string clusterId = 1; required string scmId = 2; + repeated string peerRoles = 3; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index ade0ad965768..dc68b413536a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hdds.scm.ha; +import java.util.List; import org.apache.ratis.protocol.NotLeaderException; import org.apache.ratis.protocol.RaftPeer; @@ -52,6 +53,11 @@ public interface SCMHAManager { */ void shutdown() throws IOException; + /** + * Returns roles of ratis peers. + */ + List getRatisRoles(); + /** * Returns NotLeaderException with useful info. */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index 8bb94578b3b1..e2aa04f2f4e8 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdds.scm.ha; import com.google.common.base.Preconditions; +import java.util.List; +import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.ratis.protocol.NotLeaderException; import org.apache.ratis.protocol.RaftGroupMemberId; @@ -145,6 +147,15 @@ public void shutdown() throws IOException { ratisServer.stop(); } + @Override + public List getRatisRoles() { + return getRatisServer() + .getRaftPeers() + .stream() + .map(peer -> peer.getAddress() == null ? "" : peer.getAddress()) + .collect(Collectors.toList()); + } + /** * {@inheritDoc} */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index d5496b420e58..24f17f124c44 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -440,8 +440,8 @@ public HddsProtos.GetScmInfoResponseProto getScmInfo( return HddsProtos.GetScmInfoResponseProto.newBuilder() .setClusterId(scmInfo.getClusterId()) .setScmId(scmInfo.getScmId()) + .addAllPeerRoles(scmInfo.getRatisPeerRoles()) .build(); - } public InSafeModeResponseProto inSafeMode( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 594527a22e9a..3ad31d7913b3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -488,7 +488,8 @@ public ScmInfo getScmInfo() throws IOException { ScmInfo.Builder builder = new ScmInfo.Builder() .setClusterId(scm.getScmStorageConfig().getClusterID()) - .setScmId(scm.getScmStorageConfig().getScmId()); + .setScmId(scm.getScmStorageConfig().getScmId()) + .setRatisPeerRoles(scm.getScmHAManager().getRatisRoles()); return builder.build(); } catch (Exception ex) { auditSuccess = false; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 4513857fdb81..3d1ad7286379 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -1184,4 +1184,8 @@ public String getScmId() { public String getClusterId() { return getScmStorageConfig().getClusterID(); } + + public SCMHAManager getScmHAManager() { + return scmHAManager; + } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java index ce48c1136a9f..e31e7e16bc5e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java @@ -20,6 +20,7 @@ import java.io.IOException; import java.lang.reflect.InvocationTargetException; import java.util.ArrayList; +import java.util.Arrays; import java.util.EnumMap; import java.util.List; import java.util.Map; @@ -107,6 +108,14 @@ public void shutdown() throws IOException { ratisServer.stop(); } + @Override + public List getRatisRoles() { + return Arrays.asList( + "180.3.14.5:9865", + "180.3.14.21:9865", + "180.3.14.145:9865"); + } + /** * {@inheritDoc} */ diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 96cd5307491d..038364227f1a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -508,4 +508,8 @@ public boolean getReplicationManagerStatus() throws IOException { return storageContainerLocationClient.getReplicationManagerStatus(); } + @Override + public List getScmRatisRoles() throws IOException { + return storageContainerLocationClient.getScmInfo().getRatisPeerRoles(); + } } diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot new file mode 100644 index 000000000000..31a990f857d7 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot @@ -0,0 +1,28 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Smoketest ozone cluster startup +Library OperatingSystem +Library BuiltIn +Resource ../commonlib.robot +Test Timeout 5 minutes + +*** Variables *** + +*** Test Cases *** +Run scm roles + ${output} = Execute ozone admin scm roles + Should contain ${output} [] diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java new file mode 100644 index 000000000000..63a8e7186c79 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.shell; + +import java.net.InetSocketAddress; +import java.util.UUID; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.admin.OzoneAdmin; +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +/** + * This class tests ozone admin scm commands. + */ +public class TestScmAdminHA { + private static OzoneAdmin ozoneAdmin; + private static OzoneConfiguration conf; + private static String omServiceId; + private static int numOfOMs; + private static String clusterId; + private static String scmId; + private static MiniOzoneCluster cluster; + + @BeforeClass + public static void init() throws Exception { + ozoneAdmin = new OzoneAdmin(); + conf = new OzoneConfiguration(); + + // Init HA cluster + omServiceId = "om-service-test1"; + numOfOMs = 3; + clusterId = UUID.randomUUID().toString(); + scmId = UUID.randomUUID().toString(); + cluster = MiniOzoneCluster.newHABuilder(conf) + .setClusterId(clusterId) + .setScmId(scmId) + .setOMServiceId(omServiceId) + .setNumOfOzoneManagers(numOfOMs) + .build(); + conf.setQuietMode(false); + // enable ratis for Scm. + conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true); + cluster.waitForClusterToBeReady(); + } + + @AfterClass + public static void shutdown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @Test + public void testGetRatisRoles() { + InetSocketAddress address = + cluster.getStorageContainerManager().getClientRpcAddress(); + String hostPort = address.getHostName() + ":" + address.getPort(); + String[] args = {"--scm", hostPort, "scm", "roles"}; + ozoneAdmin.execute(args); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java new file mode 100644 index 000000000000..cf2310c3cd5c --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java @@ -0,0 +1,46 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.admin.scm; + +import java.util.List; +import java.util.concurrent.Callable; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import picocli.CommandLine; + +/** + * Handler of scm status command. + */ +@CommandLine.Command( + name = "roles", + description = "List all SCMs and their respective Ratis server roles", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) +public class GetScmRatisRolesSubcommand implements Callable { + + @CommandLine.ParentCommand + private ScmAdmin parent; + + @Override + public Void call() throws Exception { + ScmClient scmClient = parent.createScmClient(); + List roles = scmClient.getScmRatisRoles(); + System.out.println(roles); + return null; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java new file mode 100644 index 000000000000..2605a6d3b950 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java @@ -0,0 +1,60 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.admin.scm; + +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.client.ScmClient; +import org.apache.hadoop.ozone.admin.OzoneAdmin; +import picocli.CommandLine; +import picocli.CommandLine.Model.CommandSpec; +import picocli.CommandLine.Spec; + +/** + * Subcommand for admin operations related to SCM. + */ +@CommandLine.Command( + name = "scm", + description = "Ozone Storage Container Manager specific admin operations", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class, + subcommands = { + GetScmRatisRolesSubcommand.class + }) +public class ScmAdmin extends GenericCli { + + @CommandLine.ParentCommand + private OzoneAdmin parent; + + @Spec + private CommandSpec spec; + + public OzoneAdmin getParent() { + return parent; + } + + @Override + public Void call() throws Exception { + GenericCli.missingSubcommand(spec); + return null; + } + + public ScmClient createScmClient() { + return parent.createScmClient(); + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/package-info.java new file mode 100644 index 000000000000..ec15a3320069 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * SCM related Admin tools. + */ +package org.apache.hadoop.ozone.admin.scm; From 9f7ab4680117d0f0b5251027c282ae819eaf921c Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Sat, 24 Oct 2020 21:27:49 +0530 Subject: [PATCH 22/51] HDDS-3188. Add failover proxy for SCM block location. --- .../hdds/scm/exceptions/SCMException.java | 3 +- ...ocationProtocolClientSideTranslatorPB.java | 25 +- ...SCMBlockLocationFailoverProxyProvider.java | 280 ++++++++++++++++++ .../hdds/scm/proxy/SCMClientConfig.java | 103 +++++++ .../hadoop/hdds/scm/proxy/SCMProxyInfo.java | 73 +++++ .../hadoop/hdds/scm/proxy/package-info.java | 22 ++ .../src/main/proto/ScmServerProtocol.proto | 3 + ...ocationProtocolServerSideTranslatorPB.java | 18 ++ .../scm/server/SCMBlockProtocolServer.java | 4 + .../scm/server/StorageContainerManager.java | 19 ++ .../apache/hadoop/ozone/om/OzoneManager.java | 11 +- 11 files changed, 545 insertions(+), 16 deletions(-) create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMClientConfig.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/package-info.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java index 48a8e059d97b..82e3034454c2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java @@ -124,6 +124,7 @@ public enum ResultCodes { FAILED_TO_ALLOCATE_ENOUGH_BLOCKS, INTERNAL_ERROR, FAILED_TO_INIT_PIPELINE_CHOOSE_POLICY, - FAILED_TO_INIT_LEADER_CHOOSE_POLICY + FAILED_TO_INIT_LEADER_CHOOSE_POLICY, + SCM_NOT_LEADER } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index e86ee81ddb86..12c51f6ca49b 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Type; @@ -45,10 +46,11 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtocolTranslator; -import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; @@ -73,15 +75,21 @@ public final class ScmBlockLocationProtocolClientSideTranslatorPB private static final RpcController NULL_RPC_CONTROLLER = null; private final ScmBlockLocationProtocolPB rpcProxy; + private SCMBlockLocationFailoverProxyProvider failoverProxyProvider; /** * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB. * - * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy + * @param proxyProvider {@link SCMBlockLocationFailoverProxyProvider} + * failover proxy provider. */ public ScmBlockLocationProtocolClientSideTranslatorPB( - ScmBlockLocationProtocolPB rpcProxy) { - this.rpcProxy = rpcProxy; + SCMBlockLocationFailoverProxyProvider proxyProvider) { + Preconditions.checkState(proxyProvider != null); + this.failoverProxyProvider = proxyProvider; + this.rpcProxy = (ScmBlockLocationProtocolPB) RetryProxy.create( + ScmBlockLocationProtocolPB.class, failoverProxyProvider, + failoverProxyProvider.getSCMBlockLocationRetryPolicy(null)); } /** @@ -105,6 +113,11 @@ private SCMBlockLocationResponse submitRequest( try { SCMBlockLocationResponse response = rpcProxy.send(NULL_RPC_CONTROLLER, req); + if (response.getStatus() == + ScmBlockLocationProtocolProtos.Status.SCM_NOT_LEADER) { + failoverProxyProvider + .performFailoverToAssignedLeader(response.getLeaderSCMNodeId()); + } return response; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); @@ -267,7 +280,7 @@ public Object getUnderlyingProxyObject() { } @Override - public void close() { - RPC.stopProxy(rpcProxy); + public void close() throws IOException { + failoverProxyProvider.close(); } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java new file mode 100644 index 000000000000..1beb69ea28c7 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java @@ -0,0 +1,280 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.proxy; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; +import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; +import org.apache.hadoop.io.retry.FailoverProxyProvider; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.io.retry.RetryPolicy.RetryAction; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; +import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; +import static org.apache.hadoop.hdds.HddsUtils.getHostName; + +/** + * Failover proxy provider for SCM. + */ +public class SCMBlockLocationFailoverProxyProvider implements + FailoverProxyProvider, Closeable { + public static final Logger LOG = + LoggerFactory.getLogger(SCMBlockLocationFailoverProxyProvider.class); + + private Map> scmProxies; + private Map scmProxyInfoMap; + private List scmNodeIDList; + + private String currentProxySCMNodeId; + private int currentProxyIndex; + + private final ConfigurationSource conf; + private final long scmVersion; + + private final String scmServiceId; + + private String lastAttemptedLeader; + + private final int maxRetryCount; + private final long retryInterval; + + public static final String SCM_DUMMY_NODEID_PREFIX = "scm"; + + public SCMBlockLocationFailoverProxyProvider(ConfigurationSource conf) { + this.conf = conf; + this.scmVersion = RPC.getProtocolVersion(ScmBlockLocationProtocol.class); + this.scmServiceId = conf.getTrimmed(OZONE_SCM_SERVICE_IDS_KEY); + this.scmProxies = new HashMap<>(); + this.scmProxyInfoMap = new HashMap<>(); + this.scmNodeIDList = new ArrayList<>(); + loadConfigs(); + + + this.currentProxyIndex = 0; + currentProxySCMNodeId = scmNodeIDList.get(currentProxyIndex); + + SCMClientConfig config = conf.getObject(SCMClientConfig.class); + this.maxRetryCount = config.getRetryCount(); + this.retryInterval = config.getRetryInterval(); + } + + @VisibleForTesting + protected Collection getSCMAddressList() { + Collection scmAddressList = + conf.getTrimmedStringCollection(OZONE_SCM_NAMES); + Collection resultList = new ArrayList<>(); + if (!scmAddressList.isEmpty()) { + final int port = getPortNumberFromConfigKeys(conf, + ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY) + .orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT); + for (String scmAddress : scmAddressList) { + LOG.info("SCM Address for proxy is {}", scmAddress); + + Optional hostname = getHostName(scmAddress); + if (hostname.isPresent()) { + resultList.add(NetUtils.createSocketAddr( + hostname.get() + ":" + port)); + } + } + } + if (resultList.isEmpty()) { + // fall back + resultList.add(getScmAddressForBlockClients(conf)); + } + return resultList; + } + + private void loadConfigs() { + Collection scmAddressList = getSCMAddressList(); + int scmNodeIndex = 1; + for (InetSocketAddress scmAddress : scmAddressList) { + String nodeId = SCM_DUMMY_NODEID_PREFIX + scmNodeIndex; + if (scmAddress == null) { + LOG.error("Failed to create SCM proxy for {}.", nodeId); + continue; + } + scmNodeIndex++; + SCMProxyInfo scmProxyInfo = new SCMProxyInfo( + scmServiceId, nodeId, scmAddress); + ProxyInfo proxy = new ProxyInfo<>( + null, scmProxyInfo.toString()); + scmProxies.put(nodeId, proxy); + scmProxyInfoMap.put(nodeId, scmProxyInfo); + scmNodeIDList.add(nodeId); + } + + if (scmProxies.isEmpty()) { + throw new IllegalArgumentException("Could not find any configured " + + "addresses for SCM. Please configure the system with " + + OZONE_SCM_NAMES); + } + } + + @VisibleForTesting + public synchronized String getCurrentProxyOMNodeId() { + return currentProxySCMNodeId; + } + + @Override + public synchronized ProxyInfo getProxy() { + ProxyInfo currentProxyInfo = scmProxies.get(currentProxySCMNodeId); + createSCMProxyIfNeeded(currentProxyInfo, currentProxySCMNodeId); + return currentProxyInfo; + } + + @Override + public void performFailover(ScmBlockLocationProtocolPB newLeader) { + // Should do nothing here. + LOG.debug("Failing over to next proxy. {}", getCurrentProxyOMNodeId()); + } + + public void performFailoverToAssignedLeader(String newLeader) { + if (newLeader == null) { + // If newLeader is not assigned, it will fail over to next proxy. + nextProxyIndex(); + } else { + if (!assignLeaderToNode(newLeader)) { + LOG.debug("Failing over OM proxy to nodeId: {}", newLeader); + nextProxyIndex(); + } + } + } + + @Override + public Class getInterface() { + return ScmBlockLocationProtocolPB.class; + } + + @Override + public synchronized void close() throws IOException { + for (ProxyInfo proxy : scmProxies.values()) { + ScmBlockLocationProtocolPB scmProxy = proxy.proxy; + if (scmProxy != null) { + RPC.stopProxy(scmProxy); + } + } + } + + public RetryAction getRetryAction(int failovers) { + if (failovers < maxRetryCount) { + return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY, + getRetryInterval()); + } else { + return RetryAction.FAIL; + } + } + + private synchronized long getRetryInterval() { + // TODO add exponential backup + return retryInterval; + } + + private synchronized int nextProxyIndex() { + lastAttemptedLeader = currentProxySCMNodeId; + + // round robin the next proxy + currentProxyIndex = (currentProxyIndex + 1) % scmProxies.size(); + currentProxySCMNodeId = scmNodeIDList.get(currentProxyIndex); + return currentProxyIndex; + } + + private synchronized boolean assignLeaderToNode(String newLeaderNodeId) { + if (!currentProxySCMNodeId.equals(newLeaderNodeId)) { + if (scmProxies.containsKey(newLeaderNodeId)) { + lastAttemptedLeader = currentProxySCMNodeId; + currentProxySCMNodeId = newLeaderNodeId; + currentProxyIndex = scmNodeIDList.indexOf(currentProxySCMNodeId); + return true; + } + } else { + lastAttemptedLeader = currentProxySCMNodeId; + } + return false; + } + + /** + * Creates proxy object if it does not already exist. + */ + private void createSCMProxyIfNeeded(ProxyInfo proxyInfo, + String nodeId) { + if (proxyInfo.proxy == null) { + InetSocketAddress address = scmProxyInfoMap.get(nodeId).getAddress(); + try { + ScmBlockLocationProtocolPB proxy = createSCMProxy(address); + try { + proxyInfo.proxy = proxy; + } catch (IllegalAccessError iae) { + scmProxies.put(nodeId, + new ProxyInfo<>(proxy, proxyInfo.proxyInfo)); + } + } catch (IOException ioe) { + LOG.error("{} Failed to create RPC proxy to SCM at {}", + this.getClass().getSimpleName(), address, ioe); + throw new RuntimeException(ioe); + } + } + } + + private ScmBlockLocationProtocolPB createSCMProxy( + InetSocketAddress scmAddress) throws IOException { + Configuration hadoopConf = + LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); + RPC.setProtocolEngine(hadoopConf, ScmBlockLocationProtocol.class, + ProtobufRpcEngine.class); + return RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion, + scmAddress, UserGroupInformation.getCurrentUser(), hadoopConf, + NetUtils.getDefaultSocketFactory(hadoopConf), + (int)conf.getObject(SCMClientConfig.class).getRpcTimeOut()); + } + + public RetryPolicy getSCMBlockLocationRetryPolicy(String newLeader) { + RetryPolicy retryPolicy = new RetryPolicy() { + @Override + public RetryAction shouldRetry(Exception e, int retry, + int failover, boolean b) { + performFailoverToAssignedLeader(newLeader); + return getRetryAction(failover); + } + }; + return retryPolicy; + } +} + diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMClientConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMClientConfig.java new file mode 100644 index 000000000000..99dc4461f00b --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMClientConfig.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.hdds.scm.proxy; + +import org.apache.hadoop.hdds.conf.Config; +import org.apache.hadoop.hdds.conf.ConfigGroup; +import org.apache.hadoop.hdds.conf.ConfigType; + +import java.util.concurrent.TimeUnit; + +import static org.apache.hadoop.hdds.conf.ConfigTag.CLIENT; +import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE; +import static org.apache.hadoop.hdds.conf.ConfigTag.SCM; + +/** + * Config for SCM Block Client. + */ +@ConfigGroup(prefix = "hdds.scmclient") +public class SCMClientConfig { + public static final String SCM_CLIENT_RPC_TIME_OUT = "rpc.timeout"; + public static final String SCM_CLIENT_FAILOVER_MAX_RETRY = + "failover.max.retry"; + public static final String SCM_CLIENT_RETRY_INTERVAL = + "failover.retry.interval"; + + @Config(key = SCM_CLIENT_RPC_TIME_OUT, + defaultValue = "15m", + type = ConfigType.TIME, + tags = {OZONE, SCM, CLIENT}, + timeUnit = TimeUnit.MILLISECONDS, + description = "RpcClient timeout on waiting for the response from " + + "SCM. The default value is set to 15 minutes. " + + "If ipc.client.ping is set to true and this rpc-timeout " + + "is greater than the value of ipc.ping.interval, the effective " + + "value of the rpc-timeout is rounded up to multiple of " + + "ipc.ping.interval." + ) + private long rpcTimeOut = 15 * 60 * 1000; + + @Config(key = SCM_CLIENT_FAILOVER_MAX_RETRY, + defaultValue = "15", + type = ConfigType.INT, + tags = {OZONE, SCM, CLIENT}, + description = "Max retry count for SCM Client when failover happens." + ) + private int retryCount = 15; + + @Config(key = SCM_CLIENT_RETRY_INTERVAL, + defaultValue = "2s", + type = ConfigType.TIME, + tags = {OZONE, SCM, CLIENT}, + timeUnit = TimeUnit.MILLISECONDS, + description = "SCM Client timeout on waiting for the next connection " + + "retry to other SCM IP. The default value is set to 2 minutes. " + ) + private long retryInterval = 2 * 1000; + + public long getRpcTimeOut() { + return rpcTimeOut; + } + + public void setRpcTimeOut(long timeOut) { + // As at the end this value should not exceed MAX_VALUE, as underlying + // Rpc layer SocketTimeout parameter is int. + if (rpcTimeOut > Integer.MAX_VALUE) { + this.rpcTimeOut = Integer.MAX_VALUE; + } + this.rpcTimeOut = timeOut; + } + + public int getRetryCount() { + return retryCount; + } + + public void setRetryCount(int retryCount) { + this.retryCount = retryCount; + } + + public long getRetryInterval() { + return retryInterval; + } + + public void setRetryInterval(long retryInterval) { + this.retryInterval = retryInterval; + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java new file mode 100644 index 000000000000..ec2a5b01ce34 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.proxy; + +import com.google.common.base.Preconditions; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.net.InetSocketAddress; + +/** + * Class to store SCM proxy info. + */ +public class SCMProxyInfo { + private String serviceId; + private String nodeId; + private String rpcAddrStr; + private InetSocketAddress rpcAddr; + + private static final Logger LOG = + LoggerFactory.getLogger(SCMProxyInfo.class); + + public SCMProxyInfo(String serviceID, String nodeID, + InetSocketAddress rpcAddress) { + Preconditions.checkNotNull(rpcAddress); + this.serviceId = serviceID; + this.nodeId = nodeID; + this.rpcAddrStr = rpcAddress.toString(); + this.rpcAddr = rpcAddress; + if (rpcAddr.isUnresolved()) { + LOG.warn("SCM address {} for serviceID {} remains unresolved " + + "for node ID {} Check your ozone-site.xml file to ensure scm " + + "addresses are configured properly.", + rpcAddress, serviceId, nodeId); + } + } + + public String toString() { + return new StringBuilder() + .append("nodeId=") + .append(nodeId) + .append(",nodeAddress=") + .append(rpcAddrStr).toString(); + } + + public InetSocketAddress getAddress() { + return rpcAddr; + } + + public String getServiceId() { + return serviceId; + } + + public String getNodeId() { + return nodeId; + } +} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/package-info.java new file mode 100644 index 000000000000..e3bb05895e99 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.proxy; + +/** + * This package contains classes related to scm proxy. + */ diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto index 7d59bd72ef4c..bc5193f9a4b8 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto @@ -70,6 +70,8 @@ message SCMBlockLocationResponse { optional string leaderOMNodeId = 6; + optional string leaderSCMNodeId = 7; + optional AllocateScmBlockResponseProto allocateScmBlockResponse = 11; optional DeleteScmKeyBlocksResponseProto deleteScmKeyBlocksResponse = 12; optional hadoop.hdds.GetScmInfoResponseProto getScmInfoResponse = 13; @@ -116,6 +118,7 @@ enum Status { INTERNAL_ERROR = 29; FAILED_TO_INIT_PIPELINE_CHOOSE_POLICY = 30; FAILED_TO_INIT_LEADER_CHOOSE_POLICY = 31; + SCM_NOT_LEADER = 32; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java index a04e168c9980..cbb64c195a22 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.scm.server.SCMBlockProtocolServer; import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; @@ -94,9 +95,26 @@ private SCMBlockLocationResponse.Builder createSCMBlockResponse( .setTraceID(traceID); } + private boolean isLeader() throws ServiceException { + if (!(impl instanceof SCMBlockProtocolServer)) { + throw new ServiceException("Should be SCMBlockProtocolServer"); + } else { + return ((SCMBlockProtocolServer) impl).getScm().checkLeader(); + } + } + @Override public SCMBlockLocationResponse send(RpcController controller, SCMBlockLocationRequest request) throws ServiceException { + if (!isLeader()) { + SCMBlockLocationResponse.Builder response = createSCMBlockResponse( + request.getCmdType(), + request.getTraceID()); + response.setSuccess(false); + response.setStatus(Status.SCM_NOT_LEADER); + response.setLeaderSCMNodeId(null); + return response.build(); + } return dispatcher.processRequest( request, this::processMessage, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index a5d341919675..35af3cf8d698 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -300,6 +300,10 @@ public ScmInfo getScmInfo() throws IOException { } } + public StorageContainerManager getScm() { + return scm; + } + @Override public List sortDatanodes(List nodes, String clientMachine) throws IOException { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 3d1ad7286379..04b8bb88f686 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -1031,6 +1031,25 @@ public ReplicationManager getReplicationManager() { return replicationManager; } + /** + * Check if the current scm is the leader. + * @return - if the current scm is the leader. + */ + public boolean checkLeader() { + return scmHAManager.isLeader(); + } + + /** + * Get suggested leader from Raft. + * @return - suggested leader address. + */ + public String getSuggestedLeader() { + if (scmHAManager.getSuggestedLeader() == null) { + return null; + } + return scmHAManager.getSuggestedLeader().getAddress(); + } + public void checkAdminAccess(String remoteUser) throws IOException { if (remoteUser != null && !scmAdminUsernames.contains(remoteUser)) { throw new IOException( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index e58af8b49abb..898cd7c1ab8f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -69,6 +69,7 @@ import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient; @@ -187,7 +188,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString; import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName; @@ -824,16 +824,9 @@ private static ScmBlockLocationProtocol getScmBlockClient( OzoneConfiguration conf) throws IOException { RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class, ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); - InetSocketAddress scmBlockAddress = - getScmAddressForBlockClients(conf); ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient = new ScmBlockLocationProtocolClientSideTranslatorPB( - RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion, - scmBlockAddress, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); + new SCMBlockLocationFailoverProxyProvider(conf)); return TracingUtil .createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class, conf); From 51111265a5fc737bbf9f01adc56df86e557513f6 Mon Sep 17 00:00:00 2001 From: Glen Geng Date: Sat, 24 Oct 2020 21:30:59 +0530 Subject: [PATCH 23/51] HDDS-4192. enable SCM Raft Group based on config ozone.scm.names. --- .../hdds/scm/ha/SCMRatisServerImpl.java | 116 ++++++++++++++++-- 1 file changed, 106 insertions(+), 10 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java index 33ae109ef825..8611b1fb591e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java @@ -18,17 +18,22 @@ package org.apache.hadoop.hdds.scm.ha; import java.io.IOException; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Collectors; +import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.protocol.ClientId; import org.apache.ratis.protocol.RaftClientReply; @@ -38,11 +43,15 @@ import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.server.RaftServer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * TODO. */ public class SCMRatisServerImpl implements SCMRatisServer { + private static final Logger LOG = + LoggerFactory.getLogger(SCMRatisServerImpl.class); private final InetSocketAddress address; private final RaftServer server; @@ -53,24 +62,20 @@ public class SCMRatisServerImpl implements SCMRatisServer { private final ClientId clientId = ClientId.randomId(); private final AtomicLong callId = new AtomicLong(); - // TODO: Refactor and remove ConfigurationSource and use only // SCMHAConfiguration. SCMRatisServerImpl(final SCMHAConfiguration haConf, final ConfigurationSource conf) throws IOException { - final String scmServiceId = "SCM-HA-Service"; - final String scmNodeId = "localhost"; - this.raftPeerId = RaftPeerId.getRaftPeerId(scmNodeId); this.address = haConf.getRatisBindAddress(); - final RaftPeer localRaftPeer = new RaftPeer(raftPeerId, address); - final List raftPeers = new ArrayList<>(); - raftPeers.add(localRaftPeer); + + SCMHAGroupBuilder scmHAGroupBuilder = new SCMHAGroupBuilder(haConf, conf); + this.raftPeerId = scmHAGroupBuilder.getPeerId(); + this.raftGroupId = scmHAGroupBuilder.getRaftGroupId(); + this.raftGroup = scmHAGroupBuilder.getRaftGroup(); + final RaftProperties serverProperties = RatisUtil .newRaftProperties(haConf, conf); - this.raftGroupId = RaftGroupId.valueOf( - UUID.nameUUIDFromBytes(scmServiceId.getBytes(StandardCharsets.UTF_8))); - this.raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers); this.scmStateMachine = new SCMStateMachine(); this.server = RaftServer.newBuilder() .setServerId(raftPeerId) @@ -125,4 +130,95 @@ public RaftGroupId getRaftGroupId() { public List getRaftPeers() { return Collections.singletonList(new RaftPeer(raftPeerId)); } + + + /** + * If the SCM group starts from {@link ScmConfigKeys#OZONE_SCM_NAMES}, + * its raft peers should locate on different nodes, and use the same port + * to communicate with each other. + * + * Each of the raft peer figures out its {@link RaftPeerId} by computing + * its position in {@link ScmConfigKeys#OZONE_SCM_NAMES}. + * + * Assume {@link ScmConfigKeys#OZONE_SCM_NAMES} is "ip0,ip1,ip2", + * scm with ip0 identifies its {@link RaftPeerId} as scm0, + * scm with ip1 identifies its {@link RaftPeerId} as scm1, + * scm with ip2 identifies its {@link RaftPeerId} as scm2. + * + * After startup, they will form a {@link RaftGroup} with groupID + * "SCM-HA-Service", and communicate with each other via + * ozone.scm.ha.ratis.bind.port. + */ + private static class SCMHAGroupBuilder { + private final static String SCM_SERVICE_ID = "SCM-HA-Service"; + + private final RaftGroupId raftGroupId; + private final RaftGroup raftGroup; + private RaftPeerId selfPeerId; + + /** + * @return raft group + */ + public RaftGroup getRaftGroup() { + return raftGroup; + } + + /** + * @return raft group id + */ + public RaftGroupId getRaftGroupId() { + return raftGroupId; + } + + /** + * @return raft peer id + */ + public RaftPeerId getPeerId() { + return selfPeerId; + } + + SCMHAGroupBuilder(final SCMHAConfiguration haConf, + final ConfigurationSource conf) throws IOException { + // fetch port + int port = haConf.getRatisBindAddress().getPort(); + + // fetch localhost + InetAddress localHost = InetAddress.getLocalHost(); + + // fetch hosts from ozone.scm.names + List hosts = + Arrays.stream(conf.getTrimmedStrings(ScmConfigKeys.OZONE_SCM_NAMES)) + .map(scmName -> HddsUtils.getHostName(scmName).get()) + .collect(Collectors.toList()); + + final List raftPeers = new ArrayList<>(); + for (int i = 0; i < hosts.size(); ++i) { + String nodeId = "scm" + i; + RaftPeerId peerId = RaftPeerId.getRaftPeerId(nodeId); + + String host = hosts.get(i); + if (InetAddress.getByName(host).equals(localHost)) { + selfPeerId = peerId; + } + + raftPeers.add(new RaftPeer(peerId, host + ":" + port)); + } + + if (selfPeerId == null) { + String errorMessage = "localhost " + localHost + + " does not exist in ozone.scm.names " + + conf.get(ScmConfigKeys.OZONE_SCM_NAMES); + throw new IOException(errorMessage); + } + + LOG.info("Build a RaftGroup for SCMHA, " + + "localHost: {}, OZONE_SCM_NAMES: {}, selfPeerId: {}", + localHost, conf.get(ScmConfigKeys.OZONE_SCM_NAMES), selfPeerId); + + raftGroupId = RaftGroupId.valueOf(UUID.nameUUIDFromBytes( + SCM_SERVICE_ID.getBytes(StandardCharsets.UTF_8))); + + raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers); + } + } } From 43b87fe01625d56343b86e9baa1cc20f3ab73236 Mon Sep 17 00:00:00 2001 From: Glen Geng Date: Sat, 24 Oct 2020 21:33:16 +0530 Subject: [PATCH 24/51] HDDS-4365. SCMBlockLocationFailoverProxyProvider should use ScmBlockLocationProtocolPB.class in RPC.setProtocolEngine. --- .../scm/proxy/SCMBlockLocationFailoverProxyProvider.java | 5 ++--- .../main/java/org/apache/hadoop/ozone/om/OzoneManager.java | 3 --- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java index 1beb69ea28c7..a9ff4c1ea775 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java @@ -22,7 +22,6 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; import org.apache.hadoop.io.retry.FailoverProxyProvider; @@ -80,7 +79,7 @@ public class SCMBlockLocationFailoverProxyProvider implements public SCMBlockLocationFailoverProxyProvider(ConfigurationSource conf) { this.conf = conf; - this.scmVersion = RPC.getProtocolVersion(ScmBlockLocationProtocol.class); + this.scmVersion = RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class); this.scmServiceId = conf.getTrimmed(OZONE_SCM_SERVICE_IDS_KEY); this.scmProxies = new HashMap<>(); this.scmProxyInfoMap = new HashMap<>(); @@ -257,7 +256,7 @@ private ScmBlockLocationProtocolPB createSCMProxy( InetSocketAddress scmAddress) throws IOException { Configuration hadoopConf = LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); - RPC.setProtocolEngine(hadoopConf, ScmBlockLocationProtocol.class, + RPC.setProtocolEngine(hadoopConf, ScmBlockLocationProtocolPB.class, ProtobufRpcEngine.class); return RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion, scmAddress, UserGroupInformation.getCurrentUser(), hadoopConf, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 898cd7c1ab8f..307ec301457c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -66,7 +66,6 @@ import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; @@ -822,8 +821,6 @@ private static void loginOMUser(OzoneConfiguration conf) */ private static ScmBlockLocationProtocol getScmBlockClient( OzoneConfiguration conf) throws IOException { - RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class, - ProtobufRpcEngine.class); ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient = new ScmBlockLocationProtocolClientSideTranslatorPB( new SCMBlockLocationFailoverProxyProvider(conf)); From 782057a4a15a4d562054c65d1e863e3a8bd8d368 Mon Sep 17 00:00:00 2001 From: Nandakumar Date: Sun, 25 Oct 2020 19:50:47 +0530 Subject: [PATCH 25/51] Resolving master merge conflict. --- .../hdds/scm/block/BlockManagerImpl.java | 2 +- .../container/CloseContainerEventHandler.java | 2 +- .../hadoop/hdds/scm/ha/SCMHAManager.java | 2 +- .../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 2 +- .../hadoop/hdds/scm/node/NewNodeHandler.java | 2 +- .../node/NonHealthyToHealthyNodeHandler.java | 2 +- .../hdds/scm/pipeline/PipelineFactory.java | 3 +- .../hdds/scm/pipeline/PipelineManager.java | 2 +- .../scm/pipeline/PipelineManagerMXBean.java | 2 +- .../scm/pipeline/PipelineManagerV2Impl.java | 14 +++- .../hadoop/hdds/scm/ha/MockSCMHAManager.java | 4 +- .../hdds/scm/ha/TestSCMRatisResponse.java | 4 +- .../scm/pipeline/MockPipelineManager.java | 10 +++ .../pipeline/MockRatisPipelineProvider.java | 9 ++- .../pipeline/TestPipelineActionHandler.java | 2 +- .../scm/pipeline/TestPipelineManagerImpl.java | 2 +- .../scm/pipeline/TestLeaderChoosePolicy.java | 2 +- .../ozone/TestStorageContainerManager.java | 4 -- .../rpc/TestDiscardPreallocatedBlocks.java | 2 +- .../hadoop/ozone/scm/TestCloseContainer.java | 2 +- .../hadoop/ozone/shell/TestScmAdminHA.java | 3 +- .../apache/hadoop/ozone/om/OzoneManager.java | 1 + .../recon/ozone-recon-web/pnpm-lock.yaml | 66 +++++++++---------- .../admin/scm/GetScmRatisRolesSubcommand.java | 9 ++- .../hadoop/ozone/admin/scm/ScmAdmin.java | 6 +- 25 files changed, 86 insertions(+), 73 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index ec0094b024e0..06d34ed0f71b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -58,7 +58,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; -import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java index a2b79fb027c0..da221934bff0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.server.events.EventPublisher; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index dc68b413536a..8ee26a25df03 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdds.scm.ha; import java.util.List; -import org.apache.ratis.protocol.NotLeaderException; import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import java.io.IOException; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index e2aa04f2f4e8..c86c7d9feb6c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -21,10 +21,10 @@ import java.util.List; import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.ratis.protocol.NotLeaderException; import org.apache.ratis.protocol.RaftGroupMemberId; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.impl.RaftServerImpl; import org.apache.ratis.server.impl.RaftServerProxy; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java index 42cada998262..08b51525e1ca 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java index e73231be628c..1cb6501e9cf2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.server.events.EventHandler; import org.apache.hadoop.hdds.server.events.EventPublisher; -import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java index bdd5053a6b07..68401d256cdc 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java @@ -45,7 +45,8 @@ public class PipelineFactory { providers.put(ReplicationType.STAND_ALONE, new SimplePipelineProvider(nodeManager, stateManager)); providers.put(ReplicationType.RATIS, - new RatisPipelineProvider(nodeManager, stateManager, conf, + new RatisPipelineProvider(nodeManager, + (PipelineStateManager) stateManager, conf, eventPublisher)); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java index ddd461b6d83e..9f714da0a4d3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus; import org.apache.hadoop.hdds.server.events.EventHandler; -import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.exceptions.NotLeaderException; /** * Interface which exposes the api for pipeline management. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java index 55e096b2a57e..57eab610f4c1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hdds.scm.pipeline; import org.apache.hadoop.hdds.annotation.InterfaceAudience; -import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import java.util.Map; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index 4690f29e14cb..8c4d7b74e5b4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -36,7 +36,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.Time; -import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -460,6 +460,18 @@ public void incNumBlocksAllocatedMetric(PipelineID id) { metrics.incNumBlocksAllocated(id); } + @Override + public int minHealthyVolumeNum(Pipeline pipeline) { + // TODO: + throw new UnsupportedOperationException(); + } + + @Override + public int minPipelineLimit(Pipeline pipeline) { + // TODO: + throw new UnsupportedOperationException(); + } + /** * Activates a dormant pipeline. * diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java index e31e7e16bc5e..886eaeef1524 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java @@ -29,14 +29,14 @@ import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; import org.apache.ratis.protocol.ClientId; import org.apache.ratis.protocol.Message; -import org.apache.ratis.protocol.NotLeaderException; import org.apache.ratis.protocol.RaftClientReply; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftGroupMemberId; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.protocol.StateMachineException; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.server.RaftServer; +import org.apache.ratis.protocol.exceptions.StateMachineException; /** * Mock SCMHAManager implementation for testing. diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java index daf08565888c..05e2970199cb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java @@ -20,13 +20,13 @@ import com.google.protobuf.InvalidProtocolBufferException; import org.apache.ratis.protocol.ClientId; -import org.apache.ratis.protocol.LeaderNotReadyException; import org.apache.ratis.protocol.Message; import org.apache.ratis.protocol.RaftClientReply; -import org.apache.ratis.protocol.RaftException; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftGroupMemberId; import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.protocol.exceptions.LeaderNotReadyException; +import org.apache.ratis.protocol.exceptions.RaftException; import org.junit.Assert; import org.junit.Before; import org.junit.Test; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java index 6292ad44376a..947cd378b93f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java @@ -192,6 +192,16 @@ public void incNumBlocksAllocatedMetric(final PipelineID id) { } + @Override + public int minHealthyVolumeNum(Pipeline pipeline) { + return 0; + } + + @Override + public int minPipelineLimit(Pipeline pipeline) { + return 0; + } + @Override public void activatePipeline(final PipelineID pipelineID) throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java index 49cac8b9f034..cd0c47522b71 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java @@ -39,20 +39,23 @@ public MockRatisPipelineProvider( NodeManager nodeManager, StateManager stateManager, ConfigurationSource conf, EventPublisher eventPublisher, boolean autoOpen) { - super(nodeManager, stateManager, conf, eventPublisher); + super(nodeManager, (PipelineStateManager) stateManager, + conf, eventPublisher); autoOpenPipeline = autoOpen; } public MockRatisPipelineProvider(NodeManager nodeManager, StateManager stateManager, ConfigurationSource conf) { - super(nodeManager, stateManager, conf, new EventQueue()); + super(nodeManager, (PipelineStateManager) stateManager, + conf, new EventQueue()); } public MockRatisPipelineProvider( NodeManager nodeManager, StateManager stateManager, ConfigurationSource conf, EventPublisher eventPublisher) { - super(nodeManager, stateManager, conf, eventPublisher); + super(nodeManager, (PipelineStateManager) stateManager, + conf, eventPublisher); autoOpenPipeline = true; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java index e40c8bace5d9..4517b896d416 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode; import org.apache.hadoop.hdds.server.events.EventQueue; import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; -import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.junit.Test; import org.mockito.Mockito; diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index 642378f6786b..51fff062af9b 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -38,7 +38,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.hadoop.test.GenericTestUtils; -import org.apache.ratis.protocol.NotLeaderException; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.junit.After; import org.junit.Assert; import org.junit.Before; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java index ecf1c2f05ac7..c043c562819a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java @@ -182,7 +182,7 @@ public void testMinLeaderCountChoosePolicy() throws Exception { int destroyNum = r.nextInt(pipelines.size()); for (int k = 0; k <= destroyNum; k++) { - pipelineManager.finalizeAndDestroyPipeline(pipelines.get(k), false); + pipelineManager.closePipeline(pipelines.get(k), false); } waitForPipelines(pipelineNum); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java index 90f4996b1004..ce696c56ef7c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java @@ -26,12 +26,8 @@ import static org.apache.hadoop.hdds.HddsConfigKeys .HDDS_SCM_SAFEMODE_PIPELINE_CREATION; import static org.junit.Assert.fail; -<<<<<<< HEAD import org.apache.hadoop.hdds.scm.TestUtils; -import org.junit.Ignore; -======= ->>>>>>> master import static org.mockito.Matchers.argThat; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java index 37e13b6f952a..fd523336fbbe 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java @@ -146,7 +146,7 @@ public void testDiscardPreallocatedBlocks() throws Exception { long containerID = locationInfos.get(0).getContainerID(); ContainerInfo container = cluster.getStorageContainerManager().getContainerManager() - .getContainer(ContainerID.valueof(containerID)); + .getContainer(ContainerID.valueOf(containerID)); Pipeline pipeline = cluster.getStorageContainerManager().getPipelineManager() .getPipeline(container.getPipelineID()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java index fe058592ba49..888422aff38f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java @@ -105,7 +105,7 @@ public void testReplicasAreReportedForClosedContainerAfterRestart() ContainerInfo container = scm.getContainerManager().getContainers().get(0); Pipeline pipeline = scm.getPipelineManager() .getPipeline(container.getPipelineID()); - scm.getPipelineManager().finalizeAndDestroyPipeline(pipeline, false); + scm.getPipelineManager().closePipeline(pipeline, false); GenericTestUtils.waitFor(() -> container.getState() == HddsProtos.LifeCycleState.CLOSED, 200, 30000); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java index 63a8e7186c79..02f88156465f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java @@ -19,10 +19,11 @@ import java.net.InetSocketAddress; import java.util.UUID; + +import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.admin.OzoneAdmin; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 307ec301457c..b012a24e07cc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -187,6 +187,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients; import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString; import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index 0385fd03f5da..516ac881e8e5 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -42,7 +42,7 @@ devDependencies: json-server: 0.15.1 npm-run-all: 4.1.5 xo: 0.30.0 -lockfileVersion: 5.1 +lockfileVersion: 5.2 packages: /3d-view/2.0.0: dependencies: @@ -2033,7 +2033,7 @@ packages: jest-haste-map: 24.9.0 jest-message-util: 24.9.0 jest-regex-util: 24.9.0 - jest-resolve: 24.9.0_jest-resolve@24.9.0 + jest-resolve: 24.9.0 jest-resolve-dependencies: 24.9.0 jest-runner: 24.9.0 jest-runtime: 24.9.0 @@ -2088,7 +2088,7 @@ packages: istanbul-lib-source-maps: 3.0.6 istanbul-reports: 2.2.7 jest-haste-map: 24.9.0 - jest-resolve: 24.9.0_jest-resolve@24.9.0 + jest-resolve: 24.9.0 jest-runtime: 24.9.0 jest-util: 24.9.0 jest-worker: 24.9.0 @@ -2196,7 +2196,7 @@ packages: integrity: sha1-zlblOfg1UrWNENZy6k1vya3HsjQ= /@mapbox/mapbox-gl-supported/1.5.0_mapbox-gl@1.10.1: dependencies: - mapbox-gl: 1.10.1_mapbox-gl@1.10.1 + mapbox-gl: 1.10.1 dev: false peerDependencies: mapbox-gl: '>=0.32.1 <2.0.0' @@ -3470,7 +3470,7 @@ packages: mkdirp: 0.5.5 pify: 4.0.1 schema-utils: 2.7.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 dev: false engines: node: '>= 6.9' @@ -5016,7 +5016,7 @@ packages: postcss-modules-values: 3.0.0 postcss-value-parser: 4.1.0 schema-utils: 2.7.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 dev: false engines: node: '>= 8.9.0' @@ -6176,7 +6176,7 @@ packages: loader-utils: 1.4.0 object-hash: 2.0.3 schema-utils: 2.7.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 dev: false engines: node: '>= 8.9.0' @@ -6912,7 +6912,7 @@ packages: dependencies: loader-utils: 1.4.0 schema-utils: 2.7.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 dev: false engines: node: '>= 8.9.0' @@ -8216,7 +8216,7 @@ packages: pretty-error: 2.1.1 tapable: 1.1.3 util.promisify: 1.0.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 dev: false engines: node: '>=6.9' @@ -9214,7 +9214,7 @@ packages: jest-get-type: 24.9.0 jest-jasmine2: 24.9.0 jest-regex-util: 24.9.0 - jest-resolve: 24.9.0_jest-resolve@24.9.0 + jest-resolve: 24.9.0 jest-util: 24.9.0 jest-validate: 24.9.0 micromatch: 3.1.10 @@ -9403,7 +9403,7 @@ packages: integrity: sha512-3BEYN5WbSq9wd+SyLDES7AHnjH9A/ROBwmz7l2y+ol+NtSFO8DYiEBzoO1CeFc9a8DYy10EO4dDFVv/wN3zl1w== /jest-pnp-resolver/1.2.1_jest-resolve@24.9.0: dependencies: - jest-resolve: 24.9.0_jest-resolve@24.9.0 + jest-resolve: 24.9.0 dev: false engines: node: '>=6' @@ -9430,7 +9430,7 @@ packages: node: '>= 6' resolution: integrity: sha512-Fm7b6AlWnYhT0BXy4hXpactHIqER7erNgIsIozDXWl5dVm+k8XdGVe1oTg1JyaFnOxarMEbax3wyRJqGP2Pq+g== - /jest-resolve/24.9.0_jest-resolve@24.9.0: + /jest-resolve/24.9.0: dependencies: '@jest/types': 24.9.0 browser-resolve: 1.11.3 @@ -9440,8 +9440,6 @@ packages: dev: false engines: node: '>= 6' - peerDependencies: - jest-resolve: '*' resolution: integrity: sha512-TaLeLVL1l08YFZAt3zaPtjiVvyy4oSA6CRe+0AFPPVX3Q/VI0giIWWoAvoS5L96vj9Dqxj4fB5p2qrHCmTU/MQ== /jest-runner/24.9.0: @@ -9459,7 +9457,7 @@ packages: jest-jasmine2: 24.9.0 jest-leak-detector: 24.9.0 jest-message-util: 24.9.0 - jest-resolve: 24.9.0_jest-resolve@24.9.0 + jest-resolve: 24.9.0 jest-runtime: 24.9.0 jest-util: 24.9.0 jest-worker: 24.9.0 @@ -9487,7 +9485,7 @@ packages: jest-message-util: 24.9.0 jest-mock: 24.9.0 jest-regex-util: 24.9.0 - jest-resolve: 24.9.0_jest-resolve@24.9.0 + jest-resolve: 24.9.0 jest-snapshot: 24.9.0 jest-util: 24.9.0 jest-validate: 24.9.0 @@ -9517,7 +9515,7 @@ packages: jest-get-type: 24.9.0 jest-matcher-utils: 24.9.0 jest-message-util: 24.9.0 - jest-resolve: 24.9.0_jest-resolve@24.9.0 + jest-resolve: 24.9.0 mkdirp: 0.5.5 natural-compare: 1.4.0 pretty-format: 24.9.0 @@ -10289,7 +10287,7 @@ packages: node: '>=0.10.0' resolution: integrity: sha1-7Nyo8TFE5mDxtb1B8S80edmN+48= - /mapbox-gl/1.10.1_mapbox-gl@1.10.1: + /mapbox-gl/1.10.1: dependencies: '@mapbox/geojson-rewind': 0.5.0 '@mapbox/geojson-types': 1.0.2 @@ -10317,8 +10315,6 @@ packages: dev: false engines: node: '>=6.4.0' - peerDependencies: - mapbox-gl: '*' resolution: integrity: sha512-0aHt+lFUpYfvh0kMIqXqNXqoYMuhuAsMlw87TbhWrw78Tx2zfuPI0Lx31/YPUgJ+Ire0tzQ4JnuBL7acDNXmMg== /marching-simplex-table/1.0.0: @@ -10575,7 +10571,7 @@ packages: loader-utils: 1.4.0 normalize-url: 1.9.1 schema-utils: 1.0.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 webpack-sources: 1.4.3 dev: false engines: @@ -11304,7 +11300,7 @@ packages: dependencies: cssnano: 4.1.10 last-call-webpack-plugin: 3.0.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 dev: false peerDependencies: webpack: ^4.0.0 @@ -11870,7 +11866,7 @@ packages: has-hover: 1.0.1 has-passive-events: 1.0.0 is-mobile: 2.2.1 - mapbox-gl: 1.10.1_mapbox-gl@1.10.1 + mapbox-gl: 1.10.1 matrix-camera-controller: 2.1.3 mouse-change: 1.4.0 mouse-event-offset: 3.0.2 @@ -13658,7 +13654,7 @@ packages: identity-obj-proxy: 3.0.0 jest: 24.9.0 jest-environment-jsdom-fourteen: 1.0.1 - jest-resolve: 24.9.0_jest-resolve@24.9.0 + jest-resolve: 24.9.0 jest-watch-typeahead: 0.4.2 mini-css-extract-plugin: 0.9.0_webpack@4.42.0 optimize-css-assets-webpack-plugin: 5.0.3_webpack@4.42.0 @@ -13679,7 +13675,7 @@ packages: ts-pnp: 1.1.6_typescript@3.4.5 typescript: 3.4.5 url-loader: 2.3.0_file-loader@4.3.0+webpack@4.42.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 webpack-dev-server: 3.10.3_webpack@4.42.0 webpack-manifest-plugin: 2.2.0_webpack@4.42.0 workbox-webpack-plugin: 4.3.1_webpack@4.42.0 @@ -14512,7 +14508,7 @@ packages: neo-async: 2.6.1 schema-utils: 2.7.0 semver: 6.3.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 dev: false engines: node: '>= 8.9.0' @@ -15583,7 +15579,7 @@ packages: serialize-javascript: 3.1.0 source-map: 0.6.1 terser: 4.7.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 webpack-sources: 1.4.3 worker-farm: 1.7.0 dev: false @@ -15603,7 +15599,7 @@ packages: serialize-javascript: 2.1.2 source-map: 0.6.1 terser: 4.7.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 webpack-sources: 1.4.3 dev: false engines: @@ -16188,7 +16184,7 @@ packages: loader-utils: 1.4.0 mime: 2.4.6 schema-utils: 2.7.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 dev: false engines: node: '>= 8.9.0' @@ -16491,7 +16487,7 @@ packages: mime: 2.4.6 mkdirp: 0.5.5 range-parser: 1.2.1 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 webpack-log: 2.0.0 dev: false engines: @@ -16531,7 +16527,7 @@ packages: strip-ansi: 3.0.1 supports-color: 6.1.0 url: 0.11.0 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 webpack-dev-middleware: 3.7.2_webpack@4.42.0 webpack-log: 2.0.0 ws: 6.2.1 @@ -16563,7 +16559,7 @@ packages: lodash: 4.17.15 object.entries: 1.1.2 tapable: 1.1.3 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 dev: false engines: node: '>=6.11.5' @@ -16578,7 +16574,7 @@ packages: dev: false resolution: integrity: sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ== - /webpack/4.42.0_webpack@4.42.0: + /webpack/4.42.0: dependencies: '@webassemblyjs/ast': 1.8.5 '@webassemblyjs/helper-module-context': 1.8.5 @@ -16607,8 +16603,6 @@ packages: engines: node: '>=6.11.5' hasBin: true - peerDependencies: - webpack: '*' resolution: integrity: sha512-EzJRHvwQyBiYrYqhyjW9AqM90dE4+s1/XtCfn7uWg6cS72zH+2VPFAlsnW0+W0cDi0XRjNKUMoJtpSi50+Ph6w== /websocket-driver/0.7.4: @@ -16819,7 +16813,7 @@ packages: dependencies: '@babel/runtime': 7.10.2 json-stable-stringify: 1.0.1 - webpack: 4.42.0_webpack@4.42.0 + webpack: 4.42.0 workbox-build: 4.3.1 dev: false engines: diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java index cf2310c3cd5c..c784c44754b2 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java @@ -17,9 +17,10 @@ */ package org.apache.hadoop.ozone.admin.scm; +import java.io.IOException; import java.util.List; -import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import picocli.CommandLine; @@ -31,16 +32,14 @@ description = "List all SCMs and their respective Ratis server roles", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class GetScmRatisRolesSubcommand implements Callable { +public class GetScmRatisRolesSubcommand extends ScmSubcommand { @CommandLine.ParentCommand private ScmAdmin parent; @Override - public Void call() throws Exception { - ScmClient scmClient = parent.createScmClient(); + protected void execute(ScmClient scmClient) throws IOException { List roles = scmClient.getScmRatisRoles(); System.out.println(roles); - return null; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java index 2605a6d3b950..b05e65a6e180 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java @@ -19,8 +19,7 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.ozone.admin.OzoneAdmin; +import org.apache.hadoop.hdds.cli.OzoneAdmin; import picocli.CommandLine; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; @@ -54,7 +53,4 @@ public Void call() throws Exception { return null; } - public ScmClient createScmClient() { - return parent.createScmClient(); - } } From 44a6503ce6f2a65f7ffdf70fa9f82e51b72873f2 Mon Sep 17 00:00:00 2001 From: Nandakumar Date: Tue, 17 Nov 2020 01:06:39 +0530 Subject: [PATCH 26/51] HDDS-4393. Addressing test failures after master merge. (#1587) --- .../hdds/conf/ConfigurationReflectionUtil.java | 1 + .../hadoop/hdds/scm/pipeline/PipelineFactory.java | 2 +- .../hdds/scm/pipeline/PipelineManagerV2Impl.java | 12 +++++++----- .../hdds/scm/pipeline/RatisPipelineProvider.java | 2 +- .../choose/algorithms/DefaultLeaderChoosePolicy.java | 4 ++-- .../leader/choose/algorithms/LeaderChoosePolicy.java | 8 ++++---- .../choose/algorithms/LeaderChoosePolicyFactory.java | 6 +++--- .../algorithms/MinLeaderCountChoosePolicy.java | 6 +++--- .../hdds/scm/pipeline/MockRatisPipelineProvider.java | 6 +++--- .../hdds/scm/safemode/TestSCMSafeModeManager.java | 2 ++ .../org/apache/hadoop/ozone/admin/scm/ScmAdmin.java | 10 +++++++++- 11 files changed, 36 insertions(+), 23 deletions(-) diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java index 816e1b9d7ce7..2cea51832324 100644 --- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java +++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java @@ -97,6 +97,7 @@ public static void injectConfigurationToObject(ConfigurationSource from, case SIZE: forcedFieldSet(field, configuration, from.getStorageSize(key, "0B", configAnnotation.sizeUnit())); + break; case CLASS: forcedFieldSet(field, configuration, from.getClass(key, Object.class)); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java index 68401d256cdc..6bf1d4e9bcd4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java @@ -46,7 +46,7 @@ public class PipelineFactory { new SimplePipelineProvider(nodeManager, stateManager)); providers.put(ReplicationType.RATIS, new RatisPipelineProvider(nodeManager, - (PipelineStateManager) stateManager, conf, + stateManager, conf, eventPublisher)); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index 8c4d7b74e5b4..041c94179112 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -77,12 +77,14 @@ public final class PipelineManagerV2Impl implements PipelineManager { private long pipelineWaitDefaultTimeout; private final AtomicBoolean isInSafeMode; private SCMHAManager scmhaManager; + private NodeManager nodeManager; // Used to track if the safemode pre-checks have completed. This is designed // to prevent pipelines being created until sufficient nodes have registered. private final AtomicBoolean pipelineCreationAllowed; private PipelineManagerV2Impl(ConfigurationSource conf, SCMHAManager scmhaManager, + NodeManager nodeManager, StateManager pipelineStateManager, PipelineFactory pipelineFactory, EventPublisher eventPublisher) { @@ -91,6 +93,7 @@ private PipelineManagerV2Impl(ConfigurationSource conf, this.stateManager = pipelineStateManager; this.conf = conf; this.scmhaManager = scmhaManager; + this.nodeManager = nodeManager; this.eventPublisher = eventPublisher; this.pmInfoBean = MBeans.register("SCMPipelineManager", "SCMPipelineManagerInfo", this); @@ -123,7 +126,8 @@ public static PipelineManagerV2Impl newPipelineManager( nodeManager, stateManager, conf, eventPublisher); // Create PipelineManager PipelineManagerV2Impl pipelineManager = new PipelineManagerV2Impl(conf, - scmhaManager, stateManager, pipelineFactory, eventPublisher); + scmhaManager, nodeManager, stateManager, pipelineFactory, + eventPublisher); // Create background thread. Scheduler scheduler = new Scheduler( @@ -462,14 +466,12 @@ public void incNumBlocksAllocatedMetric(PipelineID id) { @Override public int minHealthyVolumeNum(Pipeline pipeline) { - // TODO: - throw new UnsupportedOperationException(); + return nodeManager.minHealthyVolumeNum(pipeline.getNodes()); } @Override public int minPipelineLimit(Pipeline pipeline) { - // TODO: - throw new UnsupportedOperationException(); + return nodeManager.minPipelineLimit(pipeline.getNodes()); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java index 830db18d72e2..75f5278691a0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java @@ -59,7 +59,7 @@ public class RatisPipelineProvider extends PipelineProvider { @VisibleForTesting public RatisPipelineProvider(NodeManager nodeManager, - PipelineStateManager stateManager, ConfigurationSource conf, + StateManager stateManager, ConfigurationSource conf, EventPublisher eventPublisher) { super(nodeManager, stateManager); this.conf = conf; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/DefaultLeaderChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/DefaultLeaderChoosePolicy.java index 415cf10a2908..0b49ed8603b7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/DefaultLeaderChoosePolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/DefaultLeaderChoosePolicy.java @@ -19,7 +19,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager; +import org.apache.hadoop.hdds.scm.pipeline.StateManager; import java.util.List; @@ -31,7 +31,7 @@ public class DefaultLeaderChoosePolicy extends LeaderChoosePolicy { public DefaultLeaderChoosePolicy( - NodeManager nodeManager, PipelineStateManager pipelineStateManager) { + NodeManager nodeManager, StateManager pipelineStateManager) { super(nodeManager, pipelineStateManager); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicy.java index 04c155b356ce..ada770259087 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicy.java @@ -19,7 +19,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager; +import org.apache.hadoop.hdds.scm.pipeline.StateManager; import java.util.List; @@ -29,10 +29,10 @@ public abstract class LeaderChoosePolicy { private final NodeManager nodeManager; - private final PipelineStateManager pipelineStateManager; + private final StateManager pipelineStateManager; public LeaderChoosePolicy( - NodeManager nodeManager, PipelineStateManager pipelineStateManager) { + NodeManager nodeManager, StateManager pipelineStateManager) { this.nodeManager = nodeManager; this.pipelineStateManager = pipelineStateManager; } @@ -49,7 +49,7 @@ protected NodeManager getNodeManager() { return nodeManager; } - protected PipelineStateManager getPipelineStateManager() { + protected StateManager getPipelineStateManager() { return pipelineStateManager; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicyFactory.java index 8e1a0ff49784..03d676e5bc58 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicyFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicyFactory.java @@ -21,7 +21,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; -import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager; +import org.apache.hadoop.hdds.scm.pipeline.StateManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -45,7 +45,7 @@ private LeaderChoosePolicyFactory() { public static LeaderChoosePolicy getPolicy( ConfigurationSource conf, final NodeManager nodeManager, - final PipelineStateManager pipelineStateManager) throws SCMException { + final StateManager pipelineStateManager) throws SCMException { final Class policyClass = conf .getClass(ScmConfigKeys.OZONE_SCM_PIPELINE_LEADER_CHOOSING_POLICY, OZONE_SCM_PIPELINE_LEADER_CHOOSING_POLICY_DEFAULT, @@ -53,7 +53,7 @@ public static LeaderChoosePolicy getPolicy( Constructor constructor; try { constructor = policyClass.getDeclaredConstructor(NodeManager.class, - PipelineStateManager.class); + StateManager.class); LOG.info("Create leader choose policy of type {}", policyClass.getCanonicalName()); } catch (NoSuchMethodException e) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/MinLeaderCountChoosePolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/MinLeaderCountChoosePolicy.java index d4068b9e130d..8cb1df1b0b57 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/MinLeaderCountChoosePolicy.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/MinLeaderCountChoosePolicy.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException; -import org.apache.hadoop.hdds.scm.pipeline.PipelineStateManager; +import org.apache.hadoop.hdds.scm.pipeline.StateManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,7 +41,7 @@ public class MinLeaderCountChoosePolicy extends LeaderChoosePolicy { LoggerFactory.getLogger(MinLeaderCountChoosePolicy.class); public MinLeaderCountChoosePolicy( - NodeManager nodeManager, PipelineStateManager pipelineStateManager) { + NodeManager nodeManager, StateManager pipelineStateManager) { super(nodeManager, pipelineStateManager); } @@ -66,7 +66,7 @@ public DatanodeDetails chooseLeader(List dns) { private Map getSuggestedLeaderCount( List dns, NodeManager nodeManager, - PipelineStateManager pipelineStateManager) { + StateManager pipelineStateManager) { Map suggestedLeaderCount = new HashMap<>(); for (DatanodeDetails dn : dns) { suggestedLeaderCount.put(dn, 0); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java index cd0c47522b71..04d140367077 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java @@ -39,7 +39,7 @@ public MockRatisPipelineProvider( NodeManager nodeManager, StateManager stateManager, ConfigurationSource conf, EventPublisher eventPublisher, boolean autoOpen) { - super(nodeManager, (PipelineStateManager) stateManager, + super(nodeManager, stateManager, conf, eventPublisher); autoOpenPipeline = autoOpen; } @@ -47,14 +47,14 @@ public MockRatisPipelineProvider( public MockRatisPipelineProvider(NodeManager nodeManager, StateManager stateManager, ConfigurationSource conf) { - super(nodeManager, (PipelineStateManager) stateManager, + super(nodeManager, stateManager, conf, new EventQueue()); } public MockRatisPipelineProvider( NodeManager nodeManager, StateManager stateManager, ConfigurationSource conf, EventPublisher eventPublisher) { - super(nodeManager, (PipelineStateManager) stateManager, + super(nodeManager, stateManager, conf, eventPublisher); autoOpenPipeline = true; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index 0febf0630bb9..790188311345 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -55,6 +55,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import org.junit.Before; +import org.junit.Ignore; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TemporaryFolder; @@ -642,6 +643,7 @@ public void testSafeModePipelineExitRule() throws Exception { } @Test + @Ignore("The test is failing, enable after fixing it") public void testPipelinesNotCreatedUntilPreCheckPasses() throws Exception { int numOfDns = 5; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java index b05e65a6e180..d745a6a702fe 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java @@ -20,6 +20,8 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.OzoneAdmin; +import org.apache.hadoop.hdds.cli.SubcommandWithParent; +import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; @@ -35,7 +37,8 @@ subcommands = { GetScmRatisRolesSubcommand.class }) -public class ScmAdmin extends GenericCli { +@MetaInfServices(SubcommandWithParent.class) +public class ScmAdmin extends GenericCli implements SubcommandWithParent { @CommandLine.ParentCommand private OzoneAdmin parent; @@ -53,4 +56,9 @@ public Void call() throws Exception { return null; } + @Override + public Class getParentType() { + return OzoneAdmin.class; + } + } From 517358bf54da8baf26c1d25d5ae8b3d5b7438543 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Fri, 20 Nov 2020 09:52:01 +0000 Subject: [PATCH 27/51] HDDS-4488. Open RocksDB read only when loading containers at Datanode startup (#1605) * Add readonly flag to RDBStore * Fix style issue Co-authored-by: S O'Donnell --- .../container/common/utils/ContainerCache.java | 2 +- .../container/keyvalue/helpers/BlockUtils.java | 12 ++++++------ .../keyvalue/helpers/KeyValueContainerUtil.java | 7 ++++--- .../container/metadata/AbstractDatanodeStore.java | 6 +++++- .../metadata/DatanodeStoreSchemaOneImpl.java | 7 ++++--- .../metadata/DatanodeStoreSchemaTwoImpl.java | 7 ++++--- .../ozone/container/common/TestContainerCache.java | 2 +- .../hadoop/hdds/utils/db/DBStoreBuilder.java | 9 ++++++++- .../org/apache/hadoop/hdds/utils/db/RDBStore.java | 14 ++++++++++---- 9 files changed, 43 insertions(+), 23 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index a7fa54a1797f..c56c7432adcb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -157,7 +157,7 @@ public ReferenceCountedDB getDB(long containerID, String containerDBType, try { long start = Time.monotonicNow(); DatanodeStore store = BlockUtils.getUncachedDatanodeStore(containerID, - containerDBPath, schemaVersion, conf); + containerDBPath, schemaVersion, conf, false); db = new ReferenceCountedDB(store, containerDBPath); metrics.incDbOpenLatency(Time.monotonicNow() - start); } catch (Exception e) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java index 0a8d692afd95..e842d17f2ace 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java @@ -61,15 +61,15 @@ private BlockUtils() { */ public static DatanodeStore getUncachedDatanodeStore(long containerID, String containerDBPath, String schemaVersion, - ConfigurationSource conf) throws IOException { + ConfigurationSource conf, boolean readOnly) throws IOException { DatanodeStore store; if (schemaVersion.equals(OzoneConsts.SCHEMA_V1)) { store = new DatanodeStoreSchemaOneImpl(conf, - containerID, containerDBPath); + containerID, containerDBPath, readOnly); } else if (schemaVersion.equals(OzoneConsts.SCHEMA_V2)) { store = new DatanodeStoreSchemaTwoImpl(conf, - containerID, containerDBPath); + containerID, containerDBPath, readOnly); } else { throw new IllegalArgumentException( "Unrecognized database schema version: " + schemaVersion); @@ -88,11 +88,11 @@ public static DatanodeStore getUncachedDatanodeStore(long containerID, * @throws IOException */ public static DatanodeStore getUncachedDatanodeStore( - KeyValueContainerData containerData, ConfigurationSource conf) - throws IOException { + KeyValueContainerData containerData, ConfigurationSource conf, + boolean readOnly) throws IOException { return getUncachedDatanodeStore(containerData.getContainerID(), containerData.getDbFile().getAbsolutePath(), - containerData.getSchemaVersion(), conf); + containerData.getSchemaVersion(), conf, readOnly); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java index 1780b1ebf0e3..7c75108d7d83 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java @@ -106,10 +106,10 @@ public static void createContainerMetaData(long containerID, DatanodeStore store; if (schemaVersion.equals(OzoneConsts.SCHEMA_V1)) { store = new DatanodeStoreSchemaOneImpl(conf, - containerID, dbFile.getAbsolutePath()); + containerID, dbFile.getAbsolutePath(), false); } else if (schemaVersion.equals(OzoneConsts.SCHEMA_V2)) { store = new DatanodeStoreSchemaTwoImpl(conf, - containerID, dbFile.getAbsolutePath()); + containerID, dbFile.getAbsolutePath(), false); } else { throw new IllegalArgumentException( "Unrecognized schema version for container: " + schemaVersion); @@ -192,7 +192,8 @@ public static void parseKVContainerData(KeyValueContainerData kvContainerData, DatanodeStore store = null; try { try { - store = BlockUtils.getUncachedDatanodeStore(kvContainerData, config); + store = BlockUtils.getUncachedDatanodeStore( + kvContainerData, config, true); } catch (IOException e) { // If an exception is thrown, then it may indicate the RocksDB is // already open in the container cache. As this code is only executed at diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java index efbc24730af7..12921af1ead3 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java @@ -77,6 +77,7 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { private static final DBProfile DEFAULT_PROFILE = DBProfile.DISK; private static final Map OPTIONS_CACHE = new ConcurrentHashMap<>(); + private final boolean openReadOnly; /** * Constructs the metadata store and starts the DB services. @@ -85,7 +86,8 @@ public abstract class AbstractDatanodeStore implements DatanodeStore { * @throws IOException - on Failure. */ protected AbstractDatanodeStore(ConfigurationSource config, long containerID, - AbstractDatanodeDBDefinition dbDef) throws IOException { + AbstractDatanodeDBDefinition dbDef, boolean openReadOnly) + throws IOException { // The same config instance is used on each datanode, so we can share the // corresponding column family options, providing a single shared cache @@ -97,6 +99,7 @@ protected AbstractDatanodeStore(ConfigurationSource config, long containerID, this.dbDef = dbDef; this.containerID = containerID; + this.openReadOnly = openReadOnly; start(config); } @@ -121,6 +124,7 @@ public void start(ConfigurationSource config) this.store = DBStoreBuilder.newBuilder(config, dbDef) .setDBOptions(options) .setDefaultCFOptions(cfOptions) + .setOpenReadOnly(openReadOnly) .build(); // Use the DatanodeTable wrapper to disable the table iterator on diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java index 97b9b25e275d..b72f19eeeb51 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java @@ -35,9 +35,10 @@ public class DatanodeStoreSchemaOneImpl extends AbstractDatanodeStore { * @throws IOException - on Failure. */ public DatanodeStoreSchemaOneImpl(ConfigurationSource config, - long containerID, String dbPath) - throws IOException { - super(config, containerID, new DatanodeSchemaOneDBDefinition(dbPath)); + long containerID, String dbPath, boolean openReadOnly) + throws IOException { + super(config, containerID, new DatanodeSchemaOneDBDefinition(dbPath), + openReadOnly); } @Override diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java index fd8e4fa9d087..df9b8c06712d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java @@ -37,8 +37,9 @@ public class DatanodeStoreSchemaTwoImpl extends AbstractDatanodeStore { * @throws IOException - on Failure. */ public DatanodeStoreSchemaTwoImpl(ConfigurationSource config, - long containerID, String dbPath) - throws IOException { - super(config, containerID, new DatanodeSchemaTwoDBDefinition(dbPath)); + long containerID, String dbPath, boolean openReadOnly) + throws IOException { + super(config, containerID, new DatanodeSchemaTwoDBDefinition(dbPath), + openReadOnly); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java index 3a47120181ff..e7f6388cee02 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestContainerCache.java @@ -54,7 +54,7 @@ public class TestContainerCache { private void createContainerDB(OzoneConfiguration conf, File dbFile) throws Exception { DatanodeStore store = new DatanodeStoreSchemaTwoImpl( - conf, 1, dbFile.getAbsolutePath()); + conf, 1, dbFile.getAbsolutePath(), false); // we close since the SCM pre-creates containers. // we will open and put Db handle into a cache when keys are being created diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java index 5b907afd9f82..ad48a19927a7 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java @@ -87,6 +87,8 @@ public final class DBStoreBuilder { private CodecRegistry registry; private String rocksDbStat; private RocksDBConfiguration rocksDBConfiguration; + // Flag to indicate if the RocksDB should be opened readonly. + private boolean openReadOnly = false; /** * Create DBStoreBuilder from a generic DBDefinition. @@ -187,7 +189,7 @@ public DBStore build() throws IOException { } return new RDBStore(dbFile, rocksDBOption, writeOptions, tableConfigs, - registry); + registry, openReadOnly); } public DBStoreBuilder setName(String name) { @@ -227,6 +229,11 @@ public DBStoreBuilder setPath(Path path) { return this; } + public DBStoreBuilder setOpenReadOnly(boolean readOnly) { + this.openReadOnly = readOnly; + return this; + } + /** * Set the {@link DBOptions} and default {@link ColumnFamilyOptions} based * on {@code prof}. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java index 0890a81d8fb8..adbd2eb39ead 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java @@ -71,12 +71,13 @@ public class RDBStore implements DBStore { @VisibleForTesting public RDBStore(File dbFile, DBOptions options, Set families) throws IOException { - this(dbFile, options, new WriteOptions(), families, new CodecRegistry()); + this(dbFile, options, new WriteOptions(), families, new CodecRegistry(), + false); } public RDBStore(File dbFile, DBOptions options, WriteOptions writeOptions, Set families, - CodecRegistry registry) + CodecRegistry registry, boolean readOnly) throws IOException { Preconditions.checkNotNull(dbFile, "DB file location cannot be null"); Preconditions.checkNotNull(families); @@ -108,8 +109,13 @@ public RDBStore(File dbFile, DBOptions options, extraCf.forEach(cf -> columnFamilyDescriptors.add(cf.getDescriptor())); } - db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath(), - columnFamilyDescriptors, columnFamilyHandles); + if (readOnly) { + db = RocksDB.openReadOnly(dbOptions, dbLocation.getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles); + } else { + db = RocksDB.open(dbOptions, dbLocation.getAbsolutePath(), + columnFamilyDescriptors, columnFamilyHandles); + } for (int x = 0; x < columnFamilyHandles.size(); x++) { handleTable.put( From 49cd3ec26e362146a4bfbb662a8461925f9b6339 Mon Sep 17 00:00:00 2001 From: Hanisha Koneru Date: Fri, 20 Nov 2020 13:29:18 -0800 Subject: [PATCH 28/51] HDDS-4432. Update Ratis version to latest snapshot. (#1586) Co-authored-by: Shashikant Banerjee --- .../hadoop/hdds/scm/XceiverClientRatis.java | 12 +++++------ .../apache/hadoop/hdds/ratis/RatisHelper.java | 12 ++++++++--- .../CreatePipelineCommandHandler.java | 2 +- .../server/ratis/ContainerStateMachine.java | 4 ++-- .../TestCreatePipelineCommandHandler.java | 13 ++++++++---- .../hdds/conf/DatanodeRatisServerConfig.java | 2 +- .../hdds/scm/pipeline/RatisPipelineUtils.java | 4 ++-- .../apache/hadoop/ozone/RatisTestHelper.java | 3 ++- .../om/ratis/OzoneManagerRatisServer.java | 15 +++++++++++--- .../om/ratis/OzoneManagerStateMachine.java | 2 +- .../ratis/TestOzoneManagerStateMachine.java | 10 +++++----- .../FollowerAppendLogEntryGenerator.java | 11 ++++++---- .../freon/LeaderAppendLogEntryGenerator.java | 20 +++++++++++++------ pom.xml | 2 +- 14 files changed, 72 insertions(+), 40 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java index ced9df7fb664..6e99bf3553d4 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java @@ -217,12 +217,12 @@ private CompletableFuture sendRequestAsync( if (LOG.isDebugEnabled()) { LOG.debug("sendCommandAsync ReadOnly {}", message); } - return getClient().sendReadOnlyAsync(message); + return getClient().async().sendReadOnly(message); } else { if (LOG.isDebugEnabled()) { LOG.debug("sendCommandAsync {}", message); } - return getClient().sendAsync(message); + return getClient().async().send(message); } } @@ -258,8 +258,8 @@ public XceiverClientReply watchForCommit(long index) } RaftClientReply reply; try { - CompletableFuture replyFuture = getClient() - .sendWatchAsync(index, RaftProtos.ReplicationLevel.ALL_COMMITTED); + CompletableFuture replyFuture = getClient().async() + .watch(index, RaftProtos.ReplicationLevel.ALL_COMMITTED); replyFuture.get(); } catch (Exception e) { Throwable t = HddsClientUtils.checkForException(e); @@ -267,8 +267,8 @@ public XceiverClientReply watchForCommit(long index) if (t instanceof GroupMismatchException) { throw e; } - reply = getClient() - .sendWatchAsync(index, RaftProtos.ReplicationLevel.MAJORITY_COMMITTED) + reply = getClient().async() + .watch(index, RaftProtos.ReplicationLevel.MAJORITY_COMMITTED) .get(); List commitInfoProtoList = reply.getCommitInfos().stream() diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java index 324774d7d77f..c910dd5acea8 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java @@ -104,12 +104,18 @@ public static RaftPeerId toRaftPeerId(DatanodeDetails id) { } public static RaftPeer toRaftPeer(DatanodeDetails id) { - return new RaftPeer(toRaftPeerId(id), toRaftPeerAddressString(id)); + return RaftPeer.newBuilder() + .setId(toRaftPeerId(id)) + .setAddress(toRaftPeerAddressString(id)) + .build(); } public static RaftPeer toRaftPeer(DatanodeDetails id, int priority) { - return new RaftPeer( - toRaftPeerId(id), toRaftPeerAddressString(id), priority); + return RaftPeer.newBuilder() + .setId(toRaftPeerId(id)) + .setAddress(toRaftPeerAddressString(id)) + .setPriority(priority) + .build(); } private static List toRaftPeers(Pipeline pipeline) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java index 4ad05de2cd48..db4bd76cc25f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java @@ -96,7 +96,7 @@ public void handle(SCMCommand command, OzoneContainer ozoneContainer, final RaftPeer peer = RatisHelper.toRaftPeer(d); try (RaftClient client = RatisHelper.newRaftClient(peer, conf, ozoneContainer.getTlsClientConfig())) { - client.groupAdd(group, peer.getId()); + client.getGroupManagementApi(peer.getId()).add(group); } catch (AlreadyExistsException ae) { // do not log } catch (IOException ioe) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 89ab976bc88e..1a87ce55e26d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -700,7 +700,7 @@ private synchronized void updateLastApplied() { * @param index index of the log entry */ @Override - public void notifyIndexUpdate(long term, long index) { + public void notifyTermIndexUpdated(long term, long index) { applyTransactionCompletionMap.put(index, term); // We need to call updateLastApplied here because now in ratis when a // node becomes leader, it is checking stateMachineIndex >= @@ -844,7 +844,7 @@ public void evictStateMachineCache() { } @Override - public void notifySlowness(RoleInfoProto roleInfoProto) { + public void notifyFollowerSlowness(RoleInfoProto roleInfoProto) { ratisServer.handleNodeSlowness(gid, roleInfoProto); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java index ede0b94de476..febd1c3bd0df 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCreatePipelineCommandHandler.java @@ -34,6 +34,7 @@ import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.ratis.client.RaftClient; +import org.apache.ratis.client.api.GroupManagementApi; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.protocol.ClientId; import org.apache.ratis.protocol.RaftGroup; @@ -64,6 +65,7 @@ public class TestCreatePipelineCommandHandler { private StateContext stateContext; private SCMConnectionManager connectionManager; private RaftClient raftClient; + private GroupManagementApi raftClientGroupManager; @Before public void setup() throws Exception { @@ -71,8 +73,11 @@ public void setup() throws Exception { stateContext = Mockito.mock(StateContext.class); connectionManager = Mockito.mock(SCMConnectionManager.class); raftClient = Mockito.mock(RaftClient.class); + raftClientGroupManager = Mockito.mock(GroupManagementApi.class); final RaftClient.Builder builder = mockRaftClientBuilder(); Mockito.when(builder.build()).thenReturn(raftClient); + Mockito.when(raftClient.getGroupManagementApi( + Mockito.any(RaftPeerId.class))).thenReturn(raftClientGroupManager); PowerMockito.mockStatic(RaftClient.class); PowerMockito.when(RaftClient.newBuilder()).thenReturn(builder); } @@ -121,8 +126,8 @@ public void testPipelineCreation() throws IOException { Mockito.verify(writeChanel, Mockito.times(1)) .addGroup(pipelineID.getProtobuf(), datanodes, priorityList); - Mockito.verify(raftClient, Mockito.times(2)) - .groupAdd(Mockito.any(RaftGroup.class), Mockito.any(RaftPeerId.class)); + Mockito.verify(raftClientGroupManager, Mockito.times(2)) + .add(Mockito.any(RaftGroup.class)); } @Test @@ -150,8 +155,8 @@ public void testCommandIdempotency() throws IOException { Mockito.verify(writeChanel, Mockito.times(0)) .addGroup(pipelineID.getProtobuf(), datanodes); - Mockito.verify(raftClient, Mockito.times(0)) - .groupAdd(Mockito.any(RaftGroup.class), Mockito.any(RaftPeerId.class)); + Mockito.verify(raftClientGroupManager, Mockito.times(0)) + .add(Mockito.any(RaftGroup.class)); } private List getDatanodes() { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java index 19084f179cb8..8392789735f1 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java @@ -88,7 +88,7 @@ public void setNoLeaderTimeout(Duration duration) { this.noLeaderTimeout = duration.toMillis(); } - @Config(key = "rpcslowness.timeout", + @Config(key = "rpc.slowness.timeout", defaultValue = "300s", type = ConfigType.TIME, tags = {OZONE, DATANODE, RATIS}, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java index d174a89b6fe6..97bff9a6e504 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java @@ -94,8 +94,8 @@ static void destroyPipeline(DatanodeDetails dn, PipelineID pipelineID, try(RaftClient client = RatisHelper .newRaftClient(SupportedRpcType.valueOfIgnoreCase(rpcType), p, retryPolicy, grpcTlsConfig, ozoneConf)) { - client.groupRemove(RaftGroupId.valueOf(pipelineID.getId()), - true, false, p.getId()); + client.getGroupManagementApi(p.getId()) + .remove(RaftGroupId.valueOf(pipelineID.getId()), true, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java index 535ca91b4903..668d694ea863 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java @@ -122,6 +122,7 @@ static void initXceiverServerRatis( final OzoneConfiguration conf = new OzoneConfiguration(); final RaftClient client = newRaftClient(rpc, p, RatisHelper.createRetryPolicy(conf), conf); - client.groupAdd(RatisHelper.newRaftGroup(pipeline), p.getId()); + client.getGroupManagementApi(p.getId()) + .add(RatisHelper.newRaftGroup(pipeline)); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index 0b5a2b124342..0bf58ba4d1f3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -311,7 +311,10 @@ public static OzoneManagerRatisServer newOMRatisServer( InetSocketAddress ratisAddr = new InetSocketAddress( omNodeDetails.getInetAddress(), omNodeDetails.getRatisPort()); - RaftPeer localRaftPeer = new RaftPeer(localRaftPeerId, ratisAddr); + RaftPeer localRaftPeer = RaftPeer.newBuilder() + .setId(localRaftPeerId) + .setAddress(ratisAddr) + .build(); List raftPeers = new ArrayList<>(); // Add this Ratis server to the Ratis ring @@ -322,11 +325,17 @@ public static OzoneManagerRatisServer newOMRatisServer( RaftPeerId raftPeerId = RaftPeerId.valueOf(peerNodeId); RaftPeer raftPeer; if (peerInfo.isHostUnresolved()) { - raftPeer = new RaftPeer(raftPeerId, peerInfo.getRatisHostPortStr()); + raftPeer = RaftPeer.newBuilder() + .setId(raftPeerId) + .setAddress(peerInfo.getRatisHostPortStr()) + .build(); } else { InetSocketAddress peerRatisAddr = new InetSocketAddress( peerInfo.getInetAddress(), peerInfo.getRatisPort()); - raftPeer = new RaftPeer(raftPeerId, peerRatisAddr); + raftPeer = RaftPeer.newBuilder() + .setId(raftPeerId) + .setAddress(peerRatisAddr) + .build(); } // Add other OM nodes belonging to the same OM service to the Ratis ring diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index acd637536917..aaf94e9b8c56 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -149,7 +149,7 @@ public SnapshotInfo getLatestSnapshot() { * @param index index which is being updated */ @Override - public void notifyIndexUpdate(long currentTerm, long index) { + public void notifyTermIndexUpdated(long currentTerm, long index) { // SnapshotInfo should be updated when the term changes. // The index here refers to the log entry index and the index in // SnapshotInfo represents the snapshotIndex i.e. the index of the last diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java index 5a60f7cb6a4b..285c992ee5c4 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java @@ -61,7 +61,7 @@ public void setup() throws Exception { Mockito.mock(OMRatisSnapshotInfo.class)); ozoneManagerStateMachine = new OzoneManagerStateMachine(ozoneManagerRatisServer, false); - ozoneManagerStateMachine.notifyIndexUpdate(0, 0); + ozoneManagerStateMachine.notifyTermIndexUpdated(0, 0); } @Test @@ -70,7 +70,7 @@ public void testLastAppliedIndex() { // Happy scenario. // Conf/metadata transaction. - ozoneManagerStateMachine.notifyIndexUpdate(0, 1); + ozoneManagerStateMachine.notifyTermIndexUpdated(0, 1); Assert.assertEquals(0, ozoneManagerStateMachine.getLastAppliedTermIndex().getTerm()); Assert.assertEquals(1, @@ -94,7 +94,7 @@ public void testLastAppliedIndex() { ozoneManagerStateMachine.getLastAppliedTermIndex().getIndex()); // Conf/metadata transaction. - ozoneManagerStateMachine.notifyIndexUpdate(0L, 4L); + ozoneManagerStateMachine.notifyTermIndexUpdated(0L, 4L); Assert.assertEquals(0L, ozoneManagerStateMachine.getLastAppliedTermIndex().getTerm()); @@ -128,7 +128,7 @@ public void testApplyTransactionsUpdateLastAppliedIndexCalledLate() { // lastAppliedIndex as 4 or not. // Conf/metadata transaction. - ozoneManagerStateMachine.notifyIndexUpdate(0, 1); + ozoneManagerStateMachine.notifyTermIndexUpdated(0, 1); Assert.assertEquals(0, ozoneManagerStateMachine.getLastAppliedTermIndex().getTerm()); Assert.assertEquals(1, @@ -143,7 +143,7 @@ public void testApplyTransactionsUpdateLastAppliedIndexCalledLate() { // Conf/metadata transaction. - ozoneManagerStateMachine.notifyIndexUpdate(0L, 5L); + ozoneManagerStateMachine.notifyTermIndexUpdated(0L, 5L); // Still it should be zero, as for 2,3,4 updateLastAppliedIndex is not yet // called so the lastAppliedIndex will be at older value. diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java index c96c8a3da0f3..b6de21811185 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java @@ -327,16 +327,19 @@ private void configureGroup() throws IOException { RaftPeerId.getRaftPeerId(serverId); RaftGroup group = RaftGroup.valueOf(groupId, - new RaftPeer(RaftPeerId.valueOf(serverId), serverAddress), - new RaftPeer(RaftPeerId.valueOf(FAKE_LEADER_ID), - FAKE_LEADER_ADDDRESS)); + RaftPeer.newBuilder().setId(serverId).setAddress(serverAddress).build(), + RaftPeer.newBuilder() + .setId(RaftPeerId.valueOf(FAKE_LEADER_ID)) + .setAddress(FAKE_LEADER_ADDDRESS) + .build()); RaftClient client = RaftClient.newBuilder() .setClientId(clientId) .setProperties(new RaftProperties(true)) .setRaftGroup(group) .build(); - RaftClientReply raftClientReply = client.groupAdd(group, peerId); + RaftClientReply raftClientReply = client.getGroupManagementApi(peerId) + .add(group); LOG.info( "Group is configured in the RAFT server (one follower, one fake " diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java index 8f6575526c64..bf2cc044d99d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java @@ -251,18 +251,26 @@ private void configureGroup() throws IOException { RaftPeerId.getRaftPeerId(serverId); RaftGroup group = RaftGroup.valueOf(groupId, - new RaftPeer(RaftPeerId.valueOf(serverId), serverAddress), - new RaftPeer(RaftPeerId.valueOf(FAKE_FOLLOWER_ID1), - FAKE_LEADER_ADDDRESS1), - new RaftPeer(RaftPeerId.valueOf(FAKE_FOLLOWER_ID1), - FAKE_LEADER_ADDDRESS2)); + RaftPeer.newBuilder() + .setId(serverId) + .setAddress(serverAddress) + .build(), + RaftPeer.newBuilder() + .setId(RaftPeerId.valueOf(FAKE_FOLLOWER_ID1)) + .setAddress(FAKE_LEADER_ADDDRESS1) + .build(), + RaftPeer.newBuilder() + .setId(RaftPeerId.valueOf(FAKE_FOLLOWER_ID1)) + .setAddress(FAKE_LEADER_ADDDRESS2) + .build()); RaftClient client = RaftClient.newBuilder() .setClientId(clientId) .setProperties(new RaftProperties(true)) .setRaftGroup(group) .build(); - RaftClientReply raftClientReply = client.groupAdd(group, peerId); + RaftClientReply raftClientReply = client.getGroupManagementApi(peerId) + .add(group); LOG.info( "Group is configured in the RAFT server (with two fake leader leader)" diff --git a/pom.xml b/pom.xml index 38bbb1af630f..07ec99d586ca 100644 --- a/pom.xml +++ b/pom.xml @@ -79,7 +79,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${ozone.version} - 1.1.0-11689cd-SNAPSHOT + 1.1.0-913f5a4-SNAPSHOT 0.6.0-SNAPSHOT From fd879be45ecf566ed1fb467048c24871a6589d81 Mon Sep 17 00:00:00 2001 From: Yang Yu Date: Sun, 22 Nov 2020 12:47:01 +0800 Subject: [PATCH 29/51] HDDS-4476. Improve the ZH translation of the HA.md in doc. (#1597) --- hadoop-hdds/docs/content/feature/HA.zh.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/docs/content/feature/HA.zh.md b/hadoop-hdds/docs/content/feature/HA.zh.md index cb89530ff560..b1975712b28d 100644 --- a/hadoop-hdds/docs/content/feature/HA.zh.md +++ b/hadoop-hdds/docs/content/feature/HA.zh.md @@ -3,7 +3,7 @@ title: "高可用" weight: 1 menu: main: - parent: 特性 + parent: 特点 summary: Ozone 用于避免单点故障的高可用设置 --- @@ -87,7 +87,7 @@ Ozone 有两个leader节点(用于键管理的 *Ozone Manager* 和用于块空 ``` -基于 [客户端接口]({{< ref path="interface/_index.md" lang="en">}}) ,定义好的 `serviceId` 就可用于替代单个 OM 主机。 +基于 [客户端接口]({{< ref path="interface/_index.zh.md" lang="zh">}}) ,定义好的 `serviceId` 就可用于替代单个 OM 主机。 例如,使用 `o3fs://` @@ -114,4 +114,4 @@ RocksDB 由后台的批处理事务线程负责更新(这也就是所谓的" ## 参考文档 * 查看 [该页面]({{< ref path="design/omha.md" lang="en">}}) 以获取详细设计文档; -* Ozone 的分发包中的 compose/ozone-om-ha 目录下提供了一个配置 OM 高可用的示例,可以借助 [docker-compose]({{< ref path="start/RunningViaDocker.md" lang="en">}}) 进行测试。 +* Ozone 的分发包中的 `compose/ozone-om-ha` 目录下提供了一个配置 OM 高可用的示例,可以借助 [docker-compose]({{< ref path="start/RunningViaDocker.md" lang="en">}}) 进行测试。 From f71fc122b945e194d32c74858e6eeaa1e4c1686e Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 23 Nov 2020 13:37:49 +0100 Subject: [PATCH 30/51] HDDS-4417. Simplify Ozone client code with configuration object -- addendum (#1581) --- .../apache/hadoop/hdds/scm/OzoneClientConfig.java | 5 ++--- .../ozone/client/io/BlockOutputStreamEntry.java | 4 ---- .../apache/hadoop/ozone/client/rpc/RpcClient.java | 2 +- .../ozone/client/rpc/TestBlockOutputStream.java | 12 +++++------- .../rpc/TestBlockOutputStreamWithFailures.java | 11 ++++------- .../rpc/TestOzoneClientRetriesOnException.java | 13 +++++-------- .../hadoop/ozone/client/rpc/TestOzoneRpcClient.java | 2 -- .../client/rpc/TestOzoneRpcClientWithRatis.java | 2 -- 8 files changed, 17 insertions(+), 34 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index 2a79edbe31eb..b3c774a2c22f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.conf.ConfigGroup; import org.apache.hadoop.hdds.conf.ConfigTag; import org.apache.hadoop.hdds.conf.ConfigType; +import org.apache.hadoop.hdds.conf.PostConstruct; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -111,9 +112,7 @@ public class OzoneClientConfig { tags = ConfigTag.CLIENT) private boolean checksumVerify = true; - public OzoneClientConfig() { - } - + @PostConstruct private void validate() { Preconditions.checkState(streamBufferSize > 0); Preconditions.checkState(streamBufferFlushSize > 0); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java index 8e90c54ee920..594bbf0bd752 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockOutputStreamEntry.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdds.scm.storage.BlockOutputStream; import org.apache.hadoop.hdds.scm.storage.BufferPool; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import com.google.common.annotations.VisibleForTesting; @@ -96,9 +95,6 @@ long getRemaining() { */ private void checkStream() throws IOException { if (this.outputStream == null) { - if (getToken() != null) { - UserGroupInformation.getCurrentUser().addToken(getToken()); - } this.outputStream = new BlockOutputStream(blockID, xceiverClientManager, pipeline, bufferPool, config, token); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 8c0ed41c78a4..c96c3efbbafb 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -666,7 +666,7 @@ public OzoneOutputStream createKey( throws IOException { verifyVolumeName(volumeName); verifyBucketName(bucketName); - if (clientConfig.isStreamBufferFlushDelay()) { + if (checkKeyNameEnabled) { HddsClientUtils.verifyKeyName(keyName); } HddsClientUtils.checkNotNull(keyName, type, factor); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index 639a64db626f..f918a8b27293 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -86,9 +86,11 @@ public static void init() throws Exception { flushSize = 2 * chunkSize; maxFlushSize = 2 * flushSize; blockSize = 2 * maxFlushSize; - OzoneClientConfig config = new OzoneClientConfig(); - config.setChecksumType(ChecksumType.NONE); - conf.setFromObject(config); + + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumType(ChecksumType.NONE); + clientConfig.setStreamBufferFlushDelay(false); + conf.setFromObject(clientConfig); conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); @@ -96,10 +98,6 @@ public static void init() throws Exception { conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB); - OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); - clientConfig.setStreamBufferFlushDelay(false); - conf.setFromObject(clientConfig); - cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(7) .setTotalPipelineNumLimit(10) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index 8463c1d6a8e1..b0404050b008 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -97,9 +97,10 @@ public void init() throws Exception { maxFlushSize = 2 * flushSize; blockSize = 2 * maxFlushSize; - OzoneClientConfig config = new OzoneClientConfig(); - config.setChecksumType(ChecksumType.NONE); - conf.setFromObject(config); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumType(ChecksumType.NONE); + clientConfig.setStreamBufferFlushDelay(false); + conf.setFromObject(clientConfig); conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 10, TimeUnit.SECONDS); @@ -120,10 +121,6 @@ public void init() throws Exception { raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3)); conf.setFromObject(raftClientConfig); - OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); - clientConfig.setStreamBufferFlushDelay(false); - conf.setFromObject(clientConfig); - RatisClientConfig ratisClientConfig = conf.getObject(RatisClientConfig.class); ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java index d885d38da748..6b1a80a7df68 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java @@ -96,19 +96,16 @@ public void init() throws Exception { maxFlushSize = 2 * flushSize; blockSize = 2 * maxFlushSize; - OzoneClientConfig config = new OzoneClientConfig(); - config.setMaxRetryCount(3); - config.setChecksumType(ChecksumType.NONE); - conf.setFromObject(config); + OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setMaxRetryCount(3); + clientConfig.setChecksumType(ChecksumType.NONE); + clientConfig.setStreamBufferFlushDelay(false); + conf.setFromObject(clientConfig); conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS); conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 3); conf.setQuietMode(false); - OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); - clientConfig.setStreamBufferFlushDelay(false); - conf.setFromObject(clientConfig); - cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(7) .setTotalPipelineNumLimit(10) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java index 17cc0ce99424..db4af1798507 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java @@ -21,7 +21,6 @@ import java.io.IOException; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.junit.AfterClass; @@ -51,7 +50,6 @@ public class TestOzoneRpcClient extends TestOzoneRpcClientAbstract { @BeforeClass public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setFromObject(new OzoneClientConfig()); conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); startCluster(conf); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java index ac84f172aedf..10400b3ef988 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; @@ -63,7 +62,6 @@ public class TestOzoneRpcClientWithRatis extends TestOzoneRpcClientAbstract { @BeforeClass public static void init() throws Exception { conf = new OzoneConfiguration(); - conf.setFromObject(new OzoneClientConfig()); conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); conf.setBoolean(ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false); From 6cc4a438a4ba2dcc312e732f244b20847b294f19 Mon Sep 17 00:00:00 2001 From: maobaolong Date: Tue, 24 Nov 2020 00:33:37 +0800 Subject: [PATCH 31/51] HDDS-4468. Fix Goofys listBucket large than 1000 objects will stuck forever (#1595) --- .../hadoop/ozone/s3/endpoint/BucketEndpoint.java | 8 +++++++- .../hadoop/ozone/s3/endpoint/ListObjectResponse.java | 11 +++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java index 067d6a447c04..789bb4511027 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java @@ -112,6 +112,10 @@ public Response list( ContinueToken decodedToken = ContinueToken.decodeFromString(continueToken); + // Assign marker to startAfter. for the compatibility of aws api v1 + if (startAfter == null && marker != null) { + startAfter = marker; + } if (startAfter != null && continueToken != null) { // If continuation token and start after both are provided, then we // ignore start After @@ -129,7 +133,7 @@ public Response list( response.setDelimiter(delimiter); response.setName(bucketName); response.setPrefix(prefix); - response.setMarker(""); + response.setMarker(marker == null ? "" : marker); response.setMaxKeys(maxKeys); response.setEncodingType(ENCODING_TYPE); response.setTruncated(false); @@ -187,6 +191,8 @@ public Response list( response.setTruncated(true); ContinueToken nextToken = new ContinueToken(lastKey, prevDir); response.setNextToken(nextToken.encodeToString()); + // Set nextMarker to be lastKey. for the compatibility of aws api v1 + response.setNextMarker(lastKey); } else { response.setTruncated(false); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java index adb5f20e3014..fb707b174dc0 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ListObjectResponse.java @@ -63,6 +63,9 @@ public class ListObjectResponse { @XmlElement(name = "NextContinuationToken") private String nextToken; + @XmlElement(name = "NextMarker") + private String nextMarker; + @XmlElement(name = "continueToken") private String continueToken; @@ -177,4 +180,12 @@ public int getKeyCount() { public void setKeyCount(int keyCount) { this.keyCount = keyCount; } + + public void setNextMarker(String nextMarker) { + this.nextMarker = nextMarker; + } + + public String getNextMarker() { + return nextMarker; + } } From 51ffc824413715c6afa66bc14899229e3ecbea34 Mon Sep 17 00:00:00 2001 From: avijayanhwx <14299376+avijayanhwx@users.noreply.github.com> Date: Mon, 23 Nov 2020 16:05:48 -0800 Subject: [PATCH 32/51] HDDS-4497. Recon File Size Count task throws SQL Exception. (#1612) --- .../ozone/recon/persistence/DefaultDataSourceProvider.java | 6 ++++++ .../ozone/recon/persistence/DerbyDataSourceProvider.java | 1 - .../apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java | 4 +++- .../recon/persistence/TestReconWithDifferentSqlDBs.java | 5 +++++ .../hadoop/ozone/recon/tasks/TestFileSizeCountTask.java | 6 ++++++ 5 files changed, 20 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java index 42cde7d149d5..24c92c77a17b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DefaultDataSourceProvider.java @@ -20,6 +20,8 @@ import javax.sql.DataSource; import org.apache.commons.lang3.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import com.google.inject.Inject; import com.google.inject.Provider; @@ -30,6 +32,9 @@ */ public class DefaultDataSourceProvider implements Provider { + private static final Logger LOG = + LoggerFactory.getLogger(DefaultDataSourceProvider.class); + @Inject private DataSourceConfiguration configuration; @@ -43,6 +48,7 @@ public class DefaultDataSourceProvider implements Provider { @Override public DataSource get() { String jdbcUrl = configuration.getJdbcUrl(); + LOG.info("JDBC Url for Recon : {} ", jdbcUrl); if (StringUtils.contains(jdbcUrl, "derby")) { return new DerbyDataSourceProvider(configuration).get(); } else if (StringUtils.contains(jdbcUrl, "sqlite")) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DerbyDataSourceProvider.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DerbyDataSourceProvider.java index 51678c011675..facb74e9fbda 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DerbyDataSourceProvider.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/persistence/DerbyDataSourceProvider.java @@ -47,7 +47,6 @@ public class DerbyDataSourceProvider implements Provider { @Override public DataSource get() { String jdbcUrl = configuration.getJdbcUrl(); - LOG.info("JDBC Url for Recon : {} ", jdbcUrl); try { createNewDerbyDatabase(jdbcUrl, RECON_SCHEMA_NAME); } catch (Exception e) { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java index 7092c548d949..e0a592ba59f3 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java @@ -96,7 +96,9 @@ public Pair reprocess(OMMetadataManager omMetadataManager) { return new ImmutablePair<>(getTaskName(), false); } // Truncate table before inserting new rows - dslContext.truncate(FILE_COUNT_BY_SIZE); + int execute = dslContext.delete(FILE_COUNT_BY_SIZE).execute(); + LOG.info("Deleted {} records from {}", execute, FILE_COUNT_BY_SIZE); + writeCountsToDB(true, fileSizeCountMap); LOG.info("Completed a 'reprocess' run of FileSizeCountTask."); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java index 12b9659cd5fd..0e096623eb5f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java @@ -20,6 +20,7 @@ import static java.util.stream.Collectors.toList; import static org.apache.hadoop.ozone.recon.ReconControllerModule.ReconDaoBindingModule.RECON_DAO_LIST; import static org.hadoop.ozone.recon.codegen.SqlDbUtils.SQLITE_DRIVER_CLASS; +import static org.hadoop.ozone.recon.schema.Tables.RECON_TASK_STATUS; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; @@ -76,6 +77,10 @@ public void testSchemaSetup() throws SQLException { ReconTaskStatusDao dao = getDao(ReconTaskStatusDao.class); dao.insert(new ReconTaskStatus("TestTask", 1L, 2L)); assertEquals(1, dao.findAll().size()); + + int numRows = getDslContext().delete(RECON_TASK_STATUS).execute(); + assertEquals(1, numRows); + assertEquals(0, dao.findAll().size()); } /** diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java index 1cfc0ad8939a..95aa52b66b4f 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMUpdateEventBuilder; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; import org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao; +import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize; import org.jooq.DSLContext; import org.jooq.Record3; import org.junit.Before; @@ -111,6 +112,11 @@ public void testReprocess() throws IOException { .thenReturn(omKeyInfo2) .thenReturn(omKeyInfo3); + // Reprocess could be called from table having existing entries. Adding + // an entry to simulate that. + fileCountBySizeDao.insert( + new FileCountBySize("vol1", "bucket1", 1024L, 10L)); + Pair result = fileSizeCountTask.reprocess(omMetadataManager); assertTrue(result.getRight()); From 1b2f2efd65f62dc0f46ffe0cb7d10e0af0897e41 Mon Sep 17 00:00:00 2001 From: Mukul Kumar Singh Date: Tue, 24 Nov 2020 20:11:33 +0530 Subject: [PATCH 33/51] HDDS-3689. Add various profiles to MiniOzoneChaosCluster to run different modes. (#1420) --- .../src/test/bin/start-chaos.sh | 2 +- .../hadoop/ozone/MiniOzoneChaosCluster.java | 5 +- .../hadoop/ozone/MiniOzoneLoadGenerator.java | 8 +- .../hadoop/ozone/OzoneChaosCluster.java | 47 ++++++++ .../ozone/TestAllMiniChaosOzoneCluster.java | 55 ++++++++++ .../TestDatanodeMiniChaosOzoneCluster.java | 53 ++++++++++ .../ozone/TestMiniChaosOzoneCluster.java | 100 ++++++++---------- ...TestOzoneManagerMiniChaosOzoneCluster.java | 57 ++++++++++ .../hadoop/ozone/failure/FailureManager.java | 3 +- .../apache/hadoop/ozone/failure/Failures.java | 13 +++ .../loadgenerators/AgedDirLoadGenerator.java | 1 - .../loadgenerators/AgedLoadGenerator.java | 6 +- .../FilesystemLoadGenerator.java | 1 - .../{utils => loadgenerators}/LoadBucket.java | 2 +- .../ozone/loadgenerators/LoadGenerator.java | 17 +++ .../NestedDirLoadGenerator.java | 1 - .../RandomDirLoadGenerator.java | 1 - .../loadgenerators/RandomLoadGenerator.java | 1 - .../loadgenerators/ReadOnlyLoadGenerator.java | 1 - .../hadoop/ozone/utils/TestProbability.java | 43 -------- .../src/test/resources/log4j.properties | 2 +- 21 files changed, 297 insertions(+), 122 deletions(-) create mode 100644 hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/OzoneChaosCluster.java create mode 100644 hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java create mode 100644 hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestDatanodeMiniChaosOzoneCluster.java create mode 100644 hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestOzoneManagerMiniChaosOzoneCluster.java rename hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/{utils => loadgenerators}/LoadBucket.java (99%) delete mode 100644 hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/utils/TestProbability.java diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/bin/start-chaos.sh b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/bin/start-chaos.sh index c02fa9622230..d3f71f09b527 100755 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/bin/start-chaos.sh +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/bin/start-chaos.sh @@ -46,7 +46,7 @@ echo "logging chaos logs and heapdump to ${logfiledirectory}" echo "Starting MiniOzoneChaosCluster with ${MVN_OPTS}" mvn clean install -DskipTests > "${compilefilename}" 2>&1 mvn exec:java \ - -Dexec.mainClass="org.apache.hadoop.ozone.TestMiniChaosOzoneCluster" \ + -Dexec.mainClass="org.apache.hadoop.ozone.OzoneChaosCluster" \ -Dexec.classpathScope=test \ -Dchaoslogfilename=${chaosfilename} \ -Dproblemlogfilename=${problemfilename} \ diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java index f0dfba88e01f..4401737ce807 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java @@ -20,7 +20,6 @@ import java.io.IOException; import java.time.Duration; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -94,7 +93,7 @@ public static FailureService of(String serviceName) { public MiniOzoneChaosCluster(OzoneConfiguration conf, List ozoneManagers, StorageContainerManager scm, List hddsDatanodes, String omServiceID, - List> clazzes) { + Set> clazzes) { super(conf, ozoneManagers, scm, hddsDatanodes, omServiceID); this.numDatanodes = getHddsDatanodes().size(); this.numOzoneManagers = ozoneManagers.size(); @@ -150,7 +149,7 @@ public void waitForClusterToBeReady() */ public static class Builder extends MiniOzoneHAClusterImpl.Builder { - private final List> clazzes = new ArrayList<>(); + private final Set> clazzes = new HashSet<>(); /** * Creates a new Builder. diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java index b7549ca5340e..437ec46afb2c 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneLoadGenerator.java @@ -23,12 +23,14 @@ import org.apache.hadoop.ozone.loadgenerators.DataBuffer; import org.apache.hadoop.ozone.loadgenerators.LoadExecutors; import org.apache.hadoop.ozone.loadgenerators.LoadGenerator; -import org.apache.hadoop.ozone.utils.LoadBucket; +import org.apache.hadoop.ozone.loadgenerators.LoadBucket; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Set; import java.util.concurrent.TimeUnit; /** @@ -48,7 +50,7 @@ public class MiniOzoneLoadGenerator { MiniOzoneLoadGenerator(OzoneVolume volume, int numThreads, int numBuffers, OzoneConfiguration conf, String omServiceId, - List> loadGenratorClazzes) + Set> loadGenratorClazzes) throws Exception { DataBuffer buffer = new DataBuffer(numBuffers); loadGenerators = new ArrayList<>(); @@ -92,7 +94,7 @@ void shutdownLoadGenerator() { * Builder to create Ozone load generator. */ public static class Builder { - private List> clazzes = new ArrayList<>(); + private Set> clazzes = new HashSet<>(); private String omServiceId; private OzoneConfiguration conf; private int numBuffers; diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/OzoneChaosCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/OzoneChaosCluster.java new file mode 100644 index 000000000000..8c258270c3e9 --- /dev/null +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/OzoneChaosCluster.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import picocli.CommandLine; + +/** + * Main driver class for Ozone Chaos Cluster + * This has multiple sub implementations of chaos cluster as options. + */ +@CommandLine.Command( + name = "chaos", + description = "Starts IO with MiniOzoneChaosCluster", + subcommands = { + TestAllMiniChaosOzoneCluster.class, + TestDatanodeMiniChaosOzoneCluster.class, + TestOzoneManagerMiniChaosOzoneCluster.class + }, + versionProvider = HddsVersionProvider.class, + mixinStandardHelpOptions = true) +public class OzoneChaosCluster extends GenericCli { + @Override + public void execute(String[] argv) { + super.execute(argv); + } + + public static void main(String[] args) { + new OzoneChaosCluster().run(args); + } +} diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java new file mode 100644 index 000000000000..ea8c15503e45 --- /dev/null +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestAllMiniChaosOzoneCluster.java @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.ozone.failure.Failures; +import org.apache.hadoop.ozone.loadgenerators.*; +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Command line utility to parse and dump a datanode ratis segment file. + */ +@CommandLine.Command( + name = "all", + description = "run chaos cluster across all daemons", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) +public class TestAllMiniChaosOzoneCluster extends TestMiniChaosOzoneCluster + implements Callable { + + @CommandLine.ParentCommand + private OzoneChaosCluster chaosCluster; + + @Override + public Void call() throws Exception { + setNumOzoneManagers(3, true); + + LoadGenerator.getClassList().forEach( + TestMiniChaosOzoneCluster::addLoadClasses); + Failures.getClassList().forEach( + TestMiniChaosOzoneCluster::addFailureClasses); + + startChaosCluster(); + + return null; + } + +} diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestDatanodeMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestDatanodeMiniChaosOzoneCluster.java new file mode 100644 index 000000000000..d3f2b2d4c2cd --- /dev/null +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestDatanodeMiniChaosOzoneCluster.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.ozone.failure.Failures; +import org.apache.hadoop.ozone.loadgenerators.RandomLoadGenerator; +import org.apache.hadoop.ozone.loadgenerators.AgedLoadGenerator; + +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Test Datanode with Chaos. + */ +@CommandLine.Command( + name = "datanode", + description = "run chaos cluster across Ozone Datanodes", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) +public class TestDatanodeMiniChaosOzoneCluster extends + TestMiniChaosOzoneCluster implements Callable { + + @Override + public Void call() throws Exception { + addLoadClasses(RandomLoadGenerator.class); + addLoadClasses(AgedLoadGenerator.class); + + addFailureClasses(Failures.DatanodeStartStopFailure.class); + addFailureClasses(Failures.DatanodeRestartFailure.class); + + startChaosCluster(); + + return null; + } + +} diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java index fdb4aaf32d5b..e2c059d2c5ba 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestMiniChaosOzoneCluster.java @@ -22,24 +22,17 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.MiniOzoneChaosCluster.FailureService; import org.apache.hadoop.ozone.failure.Failures; -import org.apache.hadoop.ozone.loadgenerators.RandomLoadGenerator; -import org.apache.hadoop.ozone.loadgenerators.ReadOnlyLoadGenerator; -import org.apache.hadoop.ozone.loadgenerators.FilesystemLoadGenerator; -import org.apache.hadoop.ozone.loadgenerators.AgedLoadGenerator; -import org.apache.hadoop.ozone.loadgenerators.AgedDirLoadGenerator; -import org.apache.hadoop.ozone.loadgenerators.RandomDirLoadGenerator; -import org.apache.hadoop.ozone.loadgenerators.NestedDirLoadGenerator; +import org.apache.hadoop.ozone.loadgenerators.LoadGenerator; import org.junit.BeforeClass; import org.junit.AfterClass; import org.junit.Ignore; import org.junit.Test; import picocli.CommandLine.Command; import picocli.CommandLine.Option; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; +import java.util.ArrayList; +import java.util.List; import java.util.concurrent.TimeUnit; /** @@ -49,8 +42,12 @@ @Command(description = "Starts IO with MiniOzoneChaosCluster", name = "chaos", mixinStandardHelpOptions = true) public class TestMiniChaosOzoneCluster extends GenericCli { - static final Logger LOG = - LoggerFactory.getLogger(TestMiniChaosOzoneCluster.class); + + private static List> failureClasses + = new ArrayList<>(); + + private static List> loadClasses + = new ArrayList<>(); @Option(names = {"-d", "--num-datanodes", "--numDatanodes"}, description = "num of datanodes. Full name --numDatanodes will be" + @@ -62,12 +59,6 @@ public class TestMiniChaosOzoneCluster extends GenericCli { " be removed in later versions.") private static int numOzoneManagers = 1; - @Option(names = {"-s", "--failure-service", "--failureService"}, - description = "service (datanode or ozoneManager) to test chaos on. " + - "Full --failureService name will be removed in later versions.", - defaultValue = "datanode") - private static String failureService = "datanode"; - @Option(names = {"-t", "--num-threads", "--numThreads"}, description = "num of IO threads. Full name --numThreads will be" + " removed in later versions.") @@ -96,41 +87,25 @@ public class TestMiniChaosOzoneCluster extends GenericCli { private static MiniOzoneChaosCluster cluster; private static MiniOzoneLoadGenerator loadGenerator; + private static String omServiceId = null; + private static final String OM_SERVICE_ID = "ozoneChaosTest"; @BeforeClass public static void init() throws Exception { OzoneConfiguration configuration = new OzoneConfiguration(); - FailureService service = FailureService.of(failureService); - String omServiceID; - MiniOzoneChaosCluster.Builder builder = + MiniOzoneChaosCluster.Builder chaosBuilder = new MiniOzoneChaosCluster.Builder(configuration); - switch (service) { - case DATANODE: - omServiceID = null; - builder - .addFailures(Failures.DatanodeRestartFailure.class) - .addFailures(Failures.DatanodeStartStopFailure.class); - break; - case OZONE_MANAGER: - omServiceID = OM_SERVICE_ID; - builder - .addFailures(Failures.OzoneManagerStartStopFailure.class) - .addFailures(Failures.OzoneManagerRestartFailure.class); - break; - default: - throw new IllegalArgumentException(); - } - - builder + chaosBuilder .setNumDatanodes(numDatanodes) .setNumOzoneManagers(numOzoneManagers) - .setOMServiceID(omServiceID) + .setOMServiceID(omServiceId) .setNumDataVolumes(numDataVolumes); + failureClasses.forEach(chaosBuilder::addFailures); - cluster = builder.build(); + cluster = chaosBuilder.build(); cluster.waitForClusterToBeReady(); String volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase(); @@ -138,20 +113,35 @@ public static void init() throws Exception { store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - loadGenerator = new MiniOzoneLoadGenerator.Builder() + MiniOzoneLoadGenerator.Builder loadBuilder = + new MiniOzoneLoadGenerator.Builder() .setVolume(volume) .setConf(configuration) .setNumBuffers(numBuffers) .setNumThreads(numThreads) - .setOMServiceId(omServiceID) - .addLoadGenerator(RandomLoadGenerator.class) - .addLoadGenerator(AgedLoadGenerator.class) - .addLoadGenerator(FilesystemLoadGenerator.class) - .addLoadGenerator(ReadOnlyLoadGenerator.class) - .addLoadGenerator(RandomDirLoadGenerator.class) - .addLoadGenerator(AgedDirLoadGenerator.class) - .addLoadGenerator(NestedDirLoadGenerator.class) - .build(); + .setOMServiceId(omServiceId); + loadClasses.forEach(loadBuilder::addLoadGenerator); + loadGenerator = loadBuilder.build(); + } + + static void addFailureClasses(Class clz) { + failureClasses.add(clz); + } + + static void addLoadClasses(Class clz) { + loadClasses.add(clz); + } + + static void setNumDatanodes(int nDns) { + numDatanodes = nDns; + } + + static void setNumOzoneManagers(int nOms, boolean enableHA) { + + if (nOms > 1 || enableHA) { + omServiceId = OM_SERVICE_ID; + } + numOzoneManagers = nOms; } /** @@ -168,8 +158,7 @@ public static void shutdown() { } } - @Override - public Void call() throws Exception { + public void startChaosCluster() throws Exception { try { init(); cluster.startChaos(failureInterval, failureInterval, TimeUnit.SECONDS); @@ -177,11 +166,6 @@ public Void call() throws Exception { } finally { shutdown(); } - return null; - } - - public static void main(String... args) { - new TestMiniChaosOzoneCluster().run(args); } @Test diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestOzoneManagerMiniChaosOzoneCluster.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestOzoneManagerMiniChaosOzoneCluster.java new file mode 100644 index 000000000000..c8fbed312608 --- /dev/null +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/TestOzoneManagerMiniChaosOzoneCluster.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + +import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.ozone.failure.Failures; +import org.apache.hadoop.ozone.loadgenerators.AgedDirLoadGenerator; +import org.apache.hadoop.ozone.loadgenerators.RandomDirLoadGenerator; +import org.apache.hadoop.ozone.loadgenerators.NestedDirLoadGenerator; + +import picocli.CommandLine; + +import java.util.concurrent.Callable; + +/** + * Chaos cluster for Ozone Manager. + */ +@CommandLine.Command( + name = "ozonemanager", + description = "run chaos cluster across Ozone Managers", + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class) +public class TestOzoneManagerMiniChaosOzoneCluster extends + TestMiniChaosOzoneCluster implements Callable { + + @Override + public Void call() throws Exception { + setNumOzoneManagers(3, true); + setNumDatanodes(3); + + addLoadClasses(AgedDirLoadGenerator.class); + addLoadClasses(RandomDirLoadGenerator.class); + addLoadClasses(NestedDirLoadGenerator.class); + + addFailureClasses(Failures.OzoneManagerRestartFailure.class); + addFailureClasses(Failures.OzoneManagerStartStopFailure.class); + + startChaosCluster(); + return null; + } + +} diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java index 15aa7f0a5a59..72fbb47bc287 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/FailureManager.java @@ -27,6 +27,7 @@ import java.util.ArrayList; import java.util.List; +import java.util.Set; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.ScheduledFuture; @@ -46,7 +47,7 @@ public class FailureManager { private final ScheduledExecutorService executorService; public FailureManager(MiniOzoneChaosCluster cluster, Configuration conf, - List> clazzes) { + Set> clazzes) { this.cluster = cluster; this.executorService = Executors.newSingleThreadScheduledExecutor(); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/Failures.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/Failures.java index 6d226ca3b33b..604fcffc8896 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/Failures.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/failure/Failures.java @@ -25,6 +25,8 @@ import org.slf4j.LoggerFactory; import java.util.Set; +import java.util.List; +import java.util.ArrayList; /** * Implementation of all the failures. @@ -41,6 +43,17 @@ public String getName() { public abstract void validateFailure(MiniOzoneChaosCluster cluster); + public static List> getClassList() { + List> classList = new ArrayList<>(); + + classList.add(OzoneManagerRestartFailure.class); + classList.add(OzoneManagerStartStopFailure.class); + classList.add(DatanodeRestartFailure.class); + classList.add(DatanodeStartStopFailure.class); + + return classList; + } + /** * Ozone Manager failures. */ diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java index f4ab9302a044..fb585ef6655d 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedDirLoadGenerator.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.loadgenerators; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.ozone.utils.LoadBucket; /** * A load generator where directories are read multiple times. diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedLoadGenerator.java index ecd6076a4f53..8cb8f3ffca52 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/AgedLoadGenerator.java @@ -19,8 +19,6 @@ package org.apache.hadoop.ozone.loadgenerators; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.ozone.utils.LoadBucket; -import org.apache.hadoop.ozone.utils.TestProbability; import java.nio.ByteBuffer; import java.util.Optional; @@ -38,7 +36,6 @@ public class AgedLoadGenerator extends LoadGenerator { private final AtomicInteger agedFileWrittenIndex; private final AtomicInteger agedFileAllocationIndex; private final LoadBucket agedLoadBucket; - private final TestProbability agedWriteProbability; private final DataBuffer dataBuffer; public AgedLoadGenerator(DataBuffer data, LoadBucket agedLoadBucket) { @@ -46,12 +43,11 @@ public AgedLoadGenerator(DataBuffer data, LoadBucket agedLoadBucket) { this.agedFileWrittenIndex = new AtomicInteger(0); this.agedFileAllocationIndex = new AtomicInteger(0); this.agedLoadBucket = agedLoadBucket; - this.agedWriteProbability = TestProbability.valueOf(10); } @Override public void generateLoad() throws Exception { - if (agedWriteProbability.isTrue()) { + if (RandomUtils.nextInt(0, 100) <= 10) { synchronized (agedFileAllocationIndex) { int index = agedFileAllocationIndex.getAndIncrement(); ByteBuffer buffer = dataBuffer.getBuffer(index); diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/FilesystemLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/FilesystemLoadGenerator.java index e6cb7e5a00a4..a5f98aa2a29f 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/FilesystemLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/FilesystemLoadGenerator.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.loadgenerators; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.ozone.utils.LoadBucket; import java.nio.ByteBuffer; diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/utils/LoadBucket.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java similarity index 99% rename from hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/utils/LoadBucket.java rename to hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java index 51c344fba5f5..c6ccb3a88134 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/utils/LoadBucket.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadBucket.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.utils; +package org.apache.hadoop.ozone.loadgenerators; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.fs.FileStatus; diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java index 7f79df58ad02..9eaa16677469 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/LoadGenerator.java @@ -18,10 +18,27 @@ package org.apache.hadoop.ozone.loadgenerators; +import java.util.ArrayList; +import java.util.List; + /** * Interface for load generator. */ public abstract class LoadGenerator { + public static List> getClassList() { + List> classList = new ArrayList<>(); + + classList.add(AgedDirLoadGenerator.class); + classList.add(AgedLoadGenerator.class); + classList.add(FilesystemLoadGenerator.class); + classList.add(NestedDirLoadGenerator.class); + classList.add(RandomDirLoadGenerator.class); + classList.add(RandomLoadGenerator.class); + classList.add(ReadOnlyLoadGenerator.class); + + return classList; + } + /* * The implemented LoadGenerators constructors should have the * constructor with the signature as following diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java index ded85a7ddf22..6ca1900b6abd 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/NestedDirLoadGenerator.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.loadgenerators; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.ozone.utils.LoadBucket; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java index 8eaba654d5ac..029148eeae79 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomDirLoadGenerator.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.loadgenerators; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.ozone.utils.LoadBucket; /** * A simple directory based load generator. diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java index 7d856ac85517..354d93ab5adb 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/RandomLoadGenerator.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.loadgenerators; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.ozone.utils.LoadBucket; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/ReadOnlyLoadGenerator.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/ReadOnlyLoadGenerator.java index 839780081092..45fffff0e4d7 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/ReadOnlyLoadGenerator.java +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/loadgenerators/ReadOnlyLoadGenerator.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.loadgenerators; import org.apache.commons.lang3.RandomUtils; -import org.apache.hadoop.ozone.utils.LoadBucket; import java.nio.ByteBuffer; diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/utils/TestProbability.java b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/utils/TestProbability.java deleted file mode 100644 index bd79115c83bd..000000000000 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/utils/TestProbability.java +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.utils; - -import com.google.common.base.Preconditions; -import org.apache.commons.lang3.RandomUtils; - -/** - * This class is used to find out if a certain event is true. - * Every event is assigned a propbability and the isTrue function returns true - * when the probability has been met. - */ -final public class TestProbability { - private int pct; - - private TestProbability(int pct) { - Preconditions.checkArgument(pct <= 100 && pct > 0); - this.pct = pct; - } - - public boolean isTrue() { - return (RandomUtils.nextInt(0, 100) <= pct); - } - - public static TestProbability valueOf(int pct) { - return new TestProbability(pct); - } -} diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/log4j.properties b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/log4j.properties index aabb0b1e4a1e..9eebeaea927e 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/log4j.properties +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/log4j.properties @@ -24,7 +24,7 @@ log4j.logger.org.apache.hadoop.util.NativeCodeLoader=ERROR log4j.logger.org.apache.ratis.grpc.client.GrpcClientProtocolClient=WARN log4j.logger.org.apache.hadoop.ozone.utils=DEBUG,stdout,CHAOS -log4j.logger.org.apache.hadoop.ozone.loadgenerators=DEBUG,stdout,CHAOS +log4j.logger.org.apache.hadoop.ozone.loadgenerators=WARN,stdout,CHAOS log4j.logger.org.apache.hadoop.ozone.failure=INFO, CHAOS log4j.appender.CHAOS.File=${chaoslogfilename} log4j.appender.CHAOS=org.apache.log4j.FileAppender From a9ff68ab7133c63f5076533669d46d0ba6921d12 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Wed, 25 Nov 2020 03:10:16 +0530 Subject: [PATCH 34/51] HDDS-4492. CLI flag --quota should default to 'spaceQuota' to preserve backward compatibility. (#1609) --- .../hadoop/ozone/shell/TestOzoneShellHA.java | 42 +++++++++++++++++++ .../ozone/shell/SetSpaceQuotaOptions.java | 3 +- 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java index 513049d3a441..830a3d652f99 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java @@ -20,6 +20,7 @@ import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileNotFoundException; +import java.io.IOException; import java.io.PrintStream; import java.util.Arrays; import java.util.List; @@ -36,6 +37,7 @@ import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.test.GenericTestUtils; @@ -48,6 +50,8 @@ import org.junit.After; import org.junit.AfterClass; import org.junit.Assert; + +import static org.junit.Assert.assertEquals; import static org.junit.Assert.fail; import org.junit.Before; import org.junit.BeforeClass; @@ -525,4 +529,42 @@ public void testDeleteToTrashOrSkipTrash() throws Exception { } } + @Test + public void testShQuota() throws IOException { + ObjectStore objectStore = cluster.getClient().getObjectStore(); + try { + // Test --quota option. + + String[] args = + new String[] {"volume", "create", "vol1", "--quota", "100BYTES"}; + execute(ozoneShell, args); + assertEquals(100, objectStore.getVolume("vol1").getQuotaInBytes()); + out.reset(); + + args = + new String[] {"bucket", "create", "vol1/buck1", "--quota", "10BYTES"}; + execute(ozoneShell, args); + assertEquals(10, + objectStore.getVolume("vol1").getBucket("buck1").getQuotaInBytes()); + + // Test --space-quota option. + + args = new String[] {"volume", "create", "vol2", "--space-quota", + "100BYTES"}; + execute(ozoneShell, args); + assertEquals(100, objectStore.getVolume("vol2").getQuotaInBytes()); + out.reset(); + + args = new String[] {"bucket", "create", "vol2/buck2", "--space-quota", + "10BYTES"}; + execute(ozoneShell, args); + assertEquals(10, + objectStore.getVolume("vol2").getBucket("buck2").getQuotaInBytes()); + } finally { + objectStore.getVolume("vol1").deleteBucket("buck1"); + objectStore.deleteVolume("vol1"); + objectStore.getVolume("vol2").deleteBucket("buck2"); + objectStore.deleteVolume("vol2"); + } + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/SetSpaceQuotaOptions.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/SetSpaceQuotaOptions.java index 364efc5fb406..8dea3a9b0afc 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/SetSpaceQuotaOptions.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/SetSpaceQuotaOptions.java @@ -24,7 +24,8 @@ */ public class SetSpaceQuotaOptions { - @CommandLine.Option(names = {"--space-quota"}, + // Added --quota for backward compatibility. + @CommandLine.Option(names = {"--space-quota", "--quota"}, description = "The maximum space quota can be used (eg. 1GB)") private String quotaInBytes; From 4b69f08a8ba40c1bba19fc779f17244ebb48aade Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Tue, 24 Nov 2020 14:35:11 -0800 Subject: [PATCH 35/51] HDDS-4501. Reload OM State fail should terminate OM for any exceptions. (#1622) --- .../main/java/org/apache/hadoop/ozone/om/OzoneManager.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 2d2c9f468a25..6229beffa265 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -3324,7 +3324,7 @@ TermIndex installCheckpoint(String leaderId, Path checkpointLocation, omRatisServer.getOmStateMachine().unpause(lastAppliedIndex, term); LOG.info("Reloaded OM state with Term: {} and Index: {}", term, lastAppliedIndex); - } catch (IOException ex) { + } catch (Exception ex) { String errorMsg = "Failed to reload OM state and instantiate services."; exitManager.exitSystem(1, errorMsg, ex, LOG); } @@ -3334,7 +3334,7 @@ TermIndex installCheckpoint(String leaderId, Path checkpointLocation, if (dbBackup != null) { FileUtils.deleteFully(dbBackup); } - } catch (IOException e) { + } catch (Exception e) { LOG.error("Failed to delete the backup of the original DB {}", dbBackup); } From 1a304ba81c9d52e2fb3a67e669d08c729a4113ae Mon Sep 17 00:00:00 2001 From: Vivek Ratnavel Subramanian Date: Tue, 24 Nov 2020 16:27:12 -0800 Subject: [PATCH 36/51] HDDS-4392. [DOC] Add Recon architecture to docs (#1602) --- .../docs/content/concept/OzoneManager.md | 4 +- hadoop-hdds/docs/content/concept/Recon.md | 163 ++++++ .../content/concept/ReconHighLevelDesign.png | Bin 0 -> 239168 bytes .../docs/content/concept/ReconOmDesign.png | Bin 0 -> 162797 bytes .../docs/content/concept/ReconScmDesign.png | Bin 0 -> 181628 bytes .../concept/StorageContainerManager.md | 5 +- hadoop-hdds/docs/content/feature/Recon.md | 26 +- .../docs/content/interface/ReconApi.md | 511 ++++++++++++++++++ .../ozone/recon/tasks/ReconTaskConfig.java | 4 +- 9 files changed, 690 insertions(+), 23 deletions(-) create mode 100644 hadoop-hdds/docs/content/concept/Recon.md create mode 100644 hadoop-hdds/docs/content/concept/ReconHighLevelDesign.png create mode 100644 hadoop-hdds/docs/content/concept/ReconOmDesign.png create mode 100644 hadoop-hdds/docs/content/concept/ReconScmDesign.png create mode 100644 hadoop-hdds/docs/content/interface/ReconApi.md diff --git a/hadoop-hdds/docs/content/concept/OzoneManager.md b/hadoop-hdds/docs/content/concept/OzoneManager.md index 0930ec95e380..5cf520ca2195 100644 --- a/hadoop-hdds/docs/content/concept/OzoneManager.md +++ b/hadoop-hdds/docs/content/concept/OzoneManager.md @@ -97,7 +97,7 @@ the data from the data node. For a detailed view of Ozone Manager this section gives a quick overview about the provided network services and the stored persisted data. -**Network services provided by Ozone Manager:** +### Network services provided by Ozone Manager: Ozone provides a network service for the client and for administration commands. The main service calls @@ -115,7 +115,7 @@ Ozone provides a network service for the client and for administration commands. * ServiceList (used for service discovery) * DBUpdates (used by [Recon]({{< ref "feature/Recon.md" >}}) downloads snapshots) -**Persisted state** +### Persisted state The following data is persisted in Ozone Manager side in a specific RocksDB directory: diff --git a/hadoop-hdds/docs/content/concept/Recon.md b/hadoop-hdds/docs/content/concept/Recon.md new file mode 100644 index 000000000000..902c865be8fa --- /dev/null +++ b/hadoop-hdds/docs/content/concept/Recon.md @@ -0,0 +1,163 @@ +--- +title: "Recon" +date: "2020-10-27" +weight: 8 +menu: + main: + parent: Architecture +summary: Recon serves as a management and monitoring console for Ozone. +--- + + +Recon serves as a management and monitoring console for Ozone. It gives a +bird's-eye view of Ozone and helps users troubleshoot any issues by presenting +the current state of the cluster through REST based APIs and rich web UI. + + +## High Level Design + +{{

}} + +
+ +On a high level, Recon collects and aggregates metadata from Ozone Manager (OM), +Storage Container Manager (SCM) and Datanodes (DN) and acts as a central +management and monitoring console. Ozone administrators can use Recon to query +the current state of the system without overloading OM or SCM. + +Recon maintains multiple databases to enable batch processing, faster querying +and to persist aggregate information. It maintains a local copy of OM db and +SCM db along with a SQL database for persisting aggregate information. + +Recon also integrates with Prometheus to provide a HTTP endpoint to query Prometheus +for Ozone metrics and also to display a few crucial point in time metrics in +the web UI. + +## Recon and Ozone Manager + +{{
}} + +
+ +Recon gets a full snapshot of OM rocks db initially from the leader OM's HTTP +endpoint, untars the file and initializes RocksDB for querying locally. The +database is kept in sync by periodically requesting delta updates from the leader +OM via RPC calls from the last applied sequence id. If for any reason, the delta +updates could not be retrieved or applied to the local db, a full snapshot is +requested again to keep the local db in sync with OM db. Due to this, Recon can +show stale information since the local db will not always be in sync. + +The db updates retrieved from OM is then converted into a batch of events for +further processing by OM db tasks via [Recon Task Framework](#task-framework). + + +## Recon and Storage Container Manager + +{{
}} + +
+ +Recon also acts as a passive SCM for datanodes. When Recon is configured in the +cluster, all the datanodes register with Recon and send heartbeats, container +reports, incremental container reports etc. to Recon similar to SCM. Recon uses +all the information it gets from datanodes to construct its own copy of SCM rocks db +locally. Recon never sends any command to datanodes in response and just acts as +a passive SCM for faster lookup of SCM metadata. + +## Task Framework + +Recon has its own Task framework to enable batch processing of data obtained +from OM and SCM. A task can listen to and act upon db events such as `PUT`, `DELETE`, +`UPDATE`, etc. on either OM db or SCM db. Based on this, a task either implements +`org.apache.hadoop.ozone.recon.tasks.ReconOmTask` or extends +`org.apache.hadoop.ozone.recon.scm.ReconScmTask`. + +An example `ReconOmTask` is `ContainerKeyMapperTask` that persists the container -> key +mapping in RocksDB. This is useful to understand which keys were part of the container +when the container is reported missing or is in a bad health state. Another example is +`FileSizeCountTask` which keeps track of count of files within a given file size range in +a SQL database. These tasks have implementations for two scenarios: + + - Full snapshot (reprocess()) + - Delta updates (process()) + +When a full snapshot of OM db is obtained from the leader OM, the reprocess() +is called on all the registered OM tasks. On subsequent delta updates, process() +is called on these OM tasks. + +An example `ReconScmTask` is `ContainerHealthTask` that runs in configurable +intervals to scan the list of all the containers and to persist the state of +unhealthy containers (`MISSING`, `MIS_REPLICATED`, `UNDER_REPLICATED`, `OVER_REPLICATED`) +in a SQL table. This information is used to determine if there are any missing +containers in the cluster. + +## Recon and Prometheus + +Recon can integrate with any Prometheus instance configured to collected metrics +and can display useful information in Recon UI in Datanodes and Pipelines pages. +Recon also exposes a proxy endpoint ([/metrics]({{< ref "interface/ReconApi.md#metrics" >}})) +to query Prometheus. This integration can be enabled by setting this configuration `ozone.recon.prometheus.http.endpoint` +to the Prometheus endpoint like `ozone.recon.prometheus.http.endpoint=localhost:9090`. + +## API Reference + +[Link to complete API Reference]({{< ref "interface/ReconApi.md" >}}) + +## Persisted state + + * A local copy of [OM database]({{< ref "concept/OzoneManager.md#persisted-state" >}}) + * A local copy of [SCM database]({{< ref "concept/StorageContainerManager.md#persisted-state" >}}) + * The following data is persisted in Recon in the specified RocksDB directory: + * ContainerKey table + * Stores the mapping (container, key) -> count + * ContainerKeyCount table + * Stores containerID -> no. of keys count within the container + + * The following data is stored in the configured SQL database (default is Derby): + * GlobalStats table + * A Key -> Value table to store aggregate information like total + number of volumes / buckets / keys present in the cluster + * FileCountBySize table + * Keeps track of the number of files present within a file size range in the cluster + * ReconTaskStatus table + * Keeps track of the status and last run timestamp of the registered OM and SCM + db tasks in the [Recon Task Framework](#task-framework) + * ContainerHistory table + * Stores ContainerReplica -> Datanode mapping with last known timestamp. This + is used to determine the last known datanodes when a container is reported missing + * UnhealthyContainers table + * Keeps track of all the Unhealthy Containers (MISSING, UNDER_REPLICATED, + OVER_REPLICATED, MIS_REPLICATED) in the cluster at any given time + + +## Notable configurations + +key | default |
description
+----|---------|------------ +ozone.recon.http-address | 0.0.0.0:9888 | The address and the base port where the Recon web UI will listen on. +ozone.recon.address | 0.0.0.0:9891 | RPC address of the Recon. +ozone.recon.db.dir | none | Directory where the Recon Server stores its metadata. +ozone.recon.om.db.dir | none | Directory where the Recon Server stores its OM snapshot DB. +ozone.recon.om.snapshot
.task.interval.delay | 10m | Interval in MINUTES by Recon to request OM DB Snapshot / delta updates. +ozone.recon.task
.missingcontainer.interval | 300s | Time interval of the periodic check for Unhealthy Containers in the cluster. +ozone.recon.sql.db.jooq.dialect | DERBY | Please refer to [SQL Dialect](https://www.jooq.org/javadoc/latest/org.jooq/org/jooq/SQLDialect.html) to specify a different dialect. +ozone.recon.sql.db.jdbc.url | jdbc:derby:${ozone.recon.db.dir}
/ozone_recon_derby.db | Recon SQL database jdbc url. +ozone.recon.sql.db.username | none | Recon SQL database username. +ozone.recon.sql.db.password | none | Recon SQL database password. +ozone.recon.sql.db.driver | org.apache.derby.jdbc
.EmbeddedDriver | Recon SQL database jdbc driver. + diff --git a/hadoop-hdds/docs/content/concept/ReconHighLevelDesign.png b/hadoop-hdds/docs/content/concept/ReconHighLevelDesign.png new file mode 100644 index 0000000000000000000000000000000000000000..3bd6443d84c2c178868a5bd876ede5c26175d4db GIT binary patch literal 239168 zcmdqJby!v1`YsHJbV^BgN=SDoNJxhQQqmwL-LXJLQM#m&5b5qzR7$!VX{5X1jEV2N z&p!LN@jd6y@8h}_YqI8;BcAcZ{oK!3yirk-!9*iLgM)*^l#`WGgM&j3f`hwpf`SA- zF>~pug@eN|v6Pflk&~39QgN^|v$QsagOhy|9gD1`-bU!R6r*wL2^E6ceY-SSOe*>p zg!d$Js92~`>7s7q`M)ODd;GclEmI7E9)W%DAVF9;)vS>YzNZ1}C#F@S&#&4O!u$#v zcIuBCs&|)u#Bnbr{+JNH;irUbz|ir)8NM}gv19H_fHt0*nsx@VAKGm{gyp6<4wY7c z;$l^}n9m0(3;P6qpT^#@v*=78Tn?vk;4>q_QCWs@zd*Z3G0yd;_)Wla@`bw4?CmX()7)BaU_CFU~SS9@xHIEjzj$2+WWWjd*ekEmMxUCJmWT%ytF z)!>{ksE|>;sU}lm56Tno_Bh5YA?Q9S=)vw z2frnrR}hR%C)VkR#r~1oXQ#SI=fHDN_~t>~z0<;=uc&Qd*>Fz>5aiT&Ol(?_f)bBp z%dyNJ(Je}2|7wl4XSuYYDt*wm^|aiZl}(+3FsnCqff;G;+rzK7^vhn|+4+Ww`|Ip! zE#evcWiZw7n~cP-_k?M_z4-K5>qT0aiu0OQf^7V!H$=6x9oQlL@7BW?zCYWuFJa=> zrLXk2PGfpK{O()JeDk;Licp!h9b>!!pV^hsv*O}szFP=5H>;=+i~Md4VcaD!xgn?H z>dk+axq*7aiwap=0#TPI;xlgB4Nr|8}zCqZyb8q<;_(hZ$HGK zmd0s*9a4o#;dlB|b&D7=m$d7*#*E`t+JfpR#0N8^qu*|Oh3GY_u0 z7cr+TvW>^==PL0sEVem?3B;-yisuwq6e#8gFVqUY+dW##^Tk_IK{S}xO_#CHOpP*( zq6%srrY%n@6AjG!#C2loGtgj$VHzzwu)BXY!Lh^o!e)LreuTzad?<*#S;;W4$fM~? zh!N(9_F7R;;80HhjF(41XTnchuqR|b;rJ+C@KKyb)lij7wNK9;%uCg<;aPxvR=Zg% zz4ojZA+pa+Db{3-yAFsQor9eOaD)60&f`7W$c`RU{pe3I{gw}R)UtPYNO{8gmIZMJ zZbE*5D8=-^3O@BMoPeNaY825Y)2d(h{OoR!SEav$2^Jw?-KF85xp^jmgL2dO9)>I& z^HZ)LrMC;r1>a+t!H!cAqhJe9F}1My0`i|S9ucqI*q^}>K=MI0Z*qT5Jn>>R3s2;x z=nU_&B+iQ$J-A3l@3g2YUUSdBDhh|A!+&#sHVmJRyzPF*>sxUzStZ)TqFX&o(A9#^ z{K=$3sI$Uc=7k)HR_^OlZK`5qen9#}{46l!{wNc2uoPa8oCTo}Cfj|YH2Ge%GPKFy zp${!R+{>nAH@t%mjNtmeynj!>=wHmqP8g7B6!)GiAt2glL5tw=Ti!3eCp1GaXXDN1 zG+j;FRp`3SxNbD;O<6x3x9E>NycqQZD`$wdYu&If-bgUMzmhY^lFEH1 zMr%egszkrq?8vQnSvsya2U2!br0TCnzs7x~{~G*N^ecmr@U~cStP~A#WTMjC@%4h(zQx{>#8op> ziBYXkC1GA-N{SN8p%_g5rL350roNR&xvV7hdFCniRrX^K68+*Bn9lLly%f7)ti@f00dEX%Oge{{-tTAe)UJuUm5+)0 zXf(AmwMAvRr}=JMQBP6dR*@kxp79u``w!hku0^p$g2huqvu1&2sb)g#Tyht~5z_RZ z()Xlk($&&MrDKDqNK4;I20Omfygf`h&G%U`QQ>=n0{^Iq!}k^T9tOCV82MK$hcSeG43uq`l|L-moX+4v@!@Yyg*^p}_Q=54LSpwxlq+}|L zL*}Ggc9lLwW<`!<_pKyy|5{sMwVzs^5c9wL#r%bn)Zpt}b{?V-d6;}~$7J}Aa8=s3 zGDD$WeC=i$zdjM9Kaufv*mvr5o?P^BEW21PPJb)IAL8?Kah~CMu2b#2k}I}j%Ieh2 z#YFR{*$e!0Hh6yMa!6lq;y3Fb)g8vR2?D|3&hl6yLzoK&xfN?8_N^1Ro|My9{xwX}p3l3m`lb(+Yl8gi39i#pQVF2wW4i=m%T zaFW$v;A32&%cOtASTBbqryn=LX*lp=6t64eeTJ+WiJDxtT=uxSC*vz6UG8G;PVRiV z^>(^bxtEbjihOa2!pB?qNz7-=r~RlaZ7a2X;#_;q(;*KViPS1axUu#|IR_vOLl4a%^=>bgZ`)jh4enV6V8wSx+O6qvbePp=m-4UaWkqfi`ZH(bB{E=m&Q; z3rQ+-Y>dX)4Ga~_=!9KyrKBULvA(AnHEZC$J`Yta# zOn!!6q(6}HOfXTLY$*wuv`g;u+C6$P7yIOUWm(c|5&NCPqUq2|hbN)Qd*bfPQJdKT zS)*C2-EK+Q!mI8FO_a&LWEQ*6a)pIure`MP_S$a6T zIa$Y1b-4$d{T$f!5_<)^gHl^0$7k)}NJUd?_?Qtd#Yrq$bbXcmSbnn*vzsjW{KU0q z+T4?2&q*v+eABmhK6AT+yii1+_wf6{lzY-)&8Fgt&RnJNbn$d~gR;-%C%Y5mzQCZ+ z5K2<<2R=1}DZ|qoa~;PwWZ*F#^Kj$&5ed%-Qs0F8?(XLE{!_y)Ccc!nGC9>&ik3GA z8IwNTz!zy}-6cNwBBIB&?pv?1bOu#LsZmllHY6XZUte&$F+m{^<3V?Ur#^5IZUA609)jgyQ7EJA+Dz1sL*C%{fu` z;Txsfi9R$S?Fcf}k~33Ogku5MC~%1IBycyu6+C!}!IS>$S{j}S?#9*U2yk$LmT-uF zwNV1^&`%h6LC^etzY*~Y4jKG)3%uM?5&mwC9F%(F@9P^U;2zvPbxAon@UCv`U}|dX zXkq7+Y1e!RK0tjgtL+E}M@SF7;N{flwm|B3JMBx+~wln;$j0W*c{z$ zoebUBY#nKTck*{XQl^f^4wlcIEbVNmpneUF?3|rMX=tE<{`K>Fo~CY=|9d1`$G?^Z z7RUkJ;oxMy%ki(i!J{J3r$Q>0Zl=~+QkFJAW?&34PC@RwB3BRmk6ZtH$ls4Xax`_2 zw6g(EI*I*nssHuxzu)}N2d~D|{Qf+TF*Fg5 zf9;wWTFMP^8?cdNmQuz{yG7({O`dn?y~F z?!Ro>wks)DVKKE-Cw^J-O67&@fjGgFwf2*KrYKfi2DLW*AttGkvJX9JtOU`I>2Jv; z^wC5gi%&PZPk1g(3A>tY)OYVroC+2WJEiQ4EY)@Y;8yN-?Gnt~Nur}e#F2o5|Kl%M z9xl^^Q~|58_#hfTI0O``KmC&Sli-|2`Ewg^S0V-#hpDRsDXZomo5I128j$~alB{@C zDv73;O3t|dv>tebZ(i8fFCxqkAzd0P`T_bP{*&CH^@3;;-c>14+oK+H)8mp%)x*(X zMJ16(@jNz$n$2QaiWjmU72|L!*c>uFI&Wks)+&W3be<@8G{o`;Vg=Q3I2qRo?RhST z^hl<&x8zFFAZ`B&dkrIGsr1wzC^7ZFm*)K1H1+PAGUK|<0I4y~sl$6T>$f*g;cnn! zoEhS{r&FttF@F)dUg+>d%M-ue=Dq?Z;co_>C6|$VqT-QsA znCj_`Z`&T)W)i&F2v9Q#R74c~Sh!I2>osO8-H&`n+f(jwfm&zigA9c$Wu-vJVLGUS zuM)X_tiO~x%oga{3gME+KPtIj3j$)0s4X0O=sy`43Z0}x7DZ2C;t3{wrA$hd_ARE6 zj%KAlS}u9Bej2H3Ih^7l?xa7WmnM@!^|mr!Daikw+yp zTdu!9!y)<^b2U=jl@mEq_LC^S9D8w#`dWnw%>_txO7J*A_e!k=&`?mhMCC+ zF2U}+`VUr-j0UzSD&;Mtt;xXC9K1_Lzg}}^>OqH_<9@dwr=X7ln%_^dy57~z;DJSp zS)v5Nz-%(K{Uq$;KUb4oFDsv0R4Qv(L#eEg&Yd6_z^6qD=H>~;(8@K zyCaeM-tH#U8|(`(3L=KDKG$>&#zbJ~+59}N_(rF4laBY<5USP+Gdr|;2_f)kmSmjO z^}hA*y%vK+lKwrF>H0bS)ye-ur_6-!`HpGh_)PsZ3f=w6EQgsl22t=^77Q7$-QIUl}PA` zuUB{+5g3R%@9%3CYRk08uo2avsTf`Di4k~|osg9A`bWcv5t4)}xCq+TlNkjuNlc|P zKi$0AlMg@>(CKI4uHQ~BO`ySx6>PkcU{SMRq>!v;u|G7|1m&6x_$%xHuXN>KNy1}-u-zIkdt}}^bSi}SoPuc z3s$4)sW!e7X=i&V(q`GiL_bWWe%1IA3I_%WDm2$W+Q$HP$!0(o1xRHp8pH+zUgVee z{ubi@m>pF;u(AHt&Ozu-BNRU55RgLQ5L+U@cCn{>7�XCIFPjr zf~Az!9A{sPZ6p|g4R?KgoT&u%Q^ElZ#yA`~@rT(1cTv2gC;<;OnTC*)H=e4*^}-2a zfK=4+K0t|?)}sy&gyYh$=3a6im=RzXqW{?^Uw};DEaj&BKP%kRn?rf}%vO&t8B7y< z>7EVb;Z;)n|JWWjKL{%{a;P^ zu6w7_GkKoJBC2B5f%>Yg9i(fk-GkR_*Oms@{;V@yGSKCg0=PEc)4ly4-T&L4n^1?- zNKugTZ-E^zCi3I>=BRVQSH7+nk3ECT8=64C5Wdw=)^B*gfKD1UNl5~;vPA?H*xu>xzEc?0@W9;s#m*MGaa|G3JAvQi-( z2Wc=O*8`x!AIs?W|GShSG0YeMTlBhf)f+zWLRELE53a|pICmk`5`T-|(jy)?bhf9N zSyfl#uK*8|h-F#;0o3be_WQ<>ED&-iNkH14n?ZN~?>o)V`ad!hOmu40BnlMa~P)QO|>_0tm}06~CRoCpfqB0x#j zh((DYCHe2#+vEv#SP4%L&7cm7A1{T5ulIyajKCJW`*84YfDUC#LGHj4;3#-a-=N1c zIuM1F^Y3Yc(P4Y2tnuPHglz9=qjNQ)85DKC)<4hoV*DoR|EDd)VFELRJ!40E3v|wn z2K0yL!X9+3{z?3Ta)qv#L?}!tfP^NInN3dgzb_t=$`JsmoYQDmI`k4k!wRp@uU9BU zh1BVQ%7j~$w$pNuWf%3S>q4uCIRS^E6v#FFpBwo<1@8rkL?-y2>h*K_tCRnSPSsfd z2mX5Y|EYL`lI+)|oSp5_DWq0mT|M*2@79%f!j}i0t$0KL^*a59gZ;n(JXLhI>|bz< zo8Y&%)B zSH1FePP@%t@iT*j1%{6jW&diiP@_6$?NN4+#zbz(@x#xieev|L*)j)5&OcZPeV8R| zHrO{WmgA!jje+le93I(4C*tdKdL>H`d+-olCT;i`pVw8c={Xw+E*`08Y}+&5EO>$# zbYs{A|N4jrn@2TynCf<1vZ?5IB+n>YcX3#*3gh{tp7~2m^{^Nh2=p~$k?AhOZVO`q zpY1yjEdY$>M;JPjQtchHRf^RHjL$0U$L2###^$%vOzrv$)F)=60qC2c9l4&9fkS}Z zLh(p->*6c7Kp9HeppVD5pg_6Pca2GO64>Eihh z6!+8prAd-?gY*g>TZaXwSg>zz7u%Oo6vH?^DumgLSH#J0Xn#o4b=w|v zSVPUqNJn9AM1svm;W=KbZXmqh!TBn&9YQWVAFP=iBS#s^sfCGcw}$vSiP|8EiWAb* zt%pw+f)*H_iu??4oiEJSQRIA5R?~xe+hE~Qc!Z1IKLwaGYMe5OVkDyzu^z>Xk)kQM z3^G;G6ttwkC;E4<{z_E6FCdWL=XWex^wop<6VLvA^-XNl=EQs`%N_U2%d!SyO5iT{?EG z>jMZJ%mh^-=`0+Bcw*Y5(sj^#A;c3|wtB~t?vCdU1c%gU?%M3Qxw}6Xj|6uQB2OuN z*y)GmoIe60iA~|+dS6SWBJp%3;$>f!CQ+$(;loGsAK)d?$eNL$$#x^SyuVz`O1Wb* z?t1V*b@*?v-5wJONcFF5c>lz*z)aB3s!dBLyvGMi49_wl&i1+J0u& z$TzC**7m4x_6K^TH|h>Va`e&MwHC%Zen?{Pn3v?RIZ==!YZC&vU{(USB8ZY9&ATuTXsfEUei@ZP8|@!cPz?<^Tv z!ee6RBWL_WYAun2=wnbRy-|HmGJ3L<;vg{@kfGDvP@=xlmt8}43_WRyt98(9w36Z) zXgyk|tt2=nVhMZ$nD#oDc6xRZ=C!s1W5vC4)t0|qbt`}Qh11#T&PiC7qPx&#VHTOs zz;**O=}ky$9@1=W=x}t598TTc&7a~0p7Z&7-%(X$ zM3m#~C;%E0LA2Kkpj7}~94W{quFJ|hF40|S{2h1JeaLH|iEwZE9Gji+Sh~3xS5sGN zj;R8ql$3~my*|uP!jndI{>Gs0`bFhdkL5zyW8)m0Huv8T2+c@zcSu3$H%tRX;Woe8 z6dB(Lv8bW%%Aomk7WVKm5%~pSX3OgEKe7F8>@$@T+Yy_KZM283yzJg7UW)CTs-GQc z(V)C13c9~#_B#%g0)@kscN=*;k^=$@UpXiVxG1Fo6?R; z74PK?=iD*vh}o@EDQw&+^E&B0b@7h^YTN-Lqhc$d9?)}o$i{CdK1~?Vf z-Lr2kD5h3Pc_qHP77@cCyhIfdoe_)U-E|Z!UpVeN>9ax?PJ~ddMCwhC!@7Gh96BsS zdLF@vLpuo5LoX$Y?Gj%3{{aXQBb-yz=z($rt+_!@mzSw@{uL*TqesPbC;}+V@zDJ1 zrAQS<2TMg%jL^_4xd#VhCED6a=6^P?()+Kv6WhI10d{QR>lIugH3LB(N%7$mmo6yD z31XR95Dk^ESJint_lJk5OreL=9y4=6!ORH7&Al6>kLju-f0En^(H56Q#KoAy14Nuw z7kG|Mcfewu*M^>4n>(A7OEd;<+}*dE+<20$S{kxM2u2kt6uAk^Bo7ZPRwh+owPEtp zT>wa~N$oGMNRhnx-_@y$@t?gBvG?#obAFftU-R~6UELBcQ`fAWNY3?CGDt9gm$o-S zHsopE9eurETAzXZN_+B5g97A>fbdRKBlYXg3W}bGrT6@$T4!{<@u~K81ma`9i`d2CCC5rXA<6Fpc zpEcN)=Q%d`7}PFKn=U!7KN~%2d3E36DL(=VqF$;|W_!u7{-cap{pd`iOkc6ino`j$ zpY&AC&>dPoKWs#hW!fZj{wV!kEamk1(O891?dK$gaU6f(E& zR|D`I#m_GmHk-Bu62KtBmMf1~g)q%__L%U;`PlKvcf>c7>8@mU?uPet=u6!Z z<++*!trnr8cVsi$ADI@md&*}%^t{|%VO@0r@YbZFX^c15Bx$$YWxX+_W`oskw60p> zTkd*oBNPNfEWFal#+H{6PDjBiM(-4Xd|+pPuiIoQF^+3SRdvd5gd^vIJBJYE5>d@9}W5uSUE!qUxmqF27 zUH)j0(|7}TTqTX5IVdp91723Cfm!fMpl)0rBo$wnfvQjhto_`?y?tJ}rTxU&YO;M(|Kq#k<_Lg>nK7}gSRWmZPEWQ#Nk%F{0^d3HZlWsk+V1yP z`;E1C2@Z1T6qUu)fP|j+Ms!?%#g?yqsVe#dwasiUqu{W@vF$D^7Sj$14H4Q;0kwRQZ zw;oI9a{pa+$r#ek6R55igu$8*&(~V5| z&BSAz0UUV52!^8P7uLIZA0&r3P7RCOV*Tv<+}a>El_^|8#h-ybhmH~QLzie^dNKgP zXhS9lLqHg>w2S53#EMi-)c@)-GScp;8M&xbD1OeOSK|b8908A@;6HdeIX1`zl7S_w zGT}{&xDwM&(T{`fv8`Rgn`%*O0)$XGcIz;NoNKXuxY+||HxIW+I-?5ZNcU-=V@mI3 z_wsQ3qdSV0Hcdx+rR))Uu9KZhys2)`(ic$?=`AAMcLu;z>u^xofpabR-`SbD03eq`QPq8D%k!f2Wb5D1Rp}hF+>F_rh8!1)c|j^mF*XMSZ5h29$~4@@P26$6av2yfYTno zIYkKuYPs^mhfy^M}vc~_w~L!qf7Rj!@o!FNce7*OmHlBxe!A(`o?cmnF*HRv!= zgiMcxwbKQ%Fk8`N}-L;kbHVpRk@IF*j*j_%c(!fL23?f!kG}*Srvk~ zSEI_##`^4IKXc4je~<;rrc2D1em~CVh>Te(Q#3}c!iUo4dMI#~XePx2&#H-_KeY=& z%HXf7vJuLsCSkhENs00V9T{;QXqEcosw6N_`w_SWt2%#aQ!L z3JwN`rf_K#A#!MvtmBZ(tBVHa(sTpXY97qc`J~mftBmOCKhK=ie`G|~36+egL=o=z=re?h^gQCT zo3LOX`RL6kxUjd|=V)Kx4~HN}aP)jnXXk*F4N-;bCi6ZLeYzFCUSf&)Tv{w)ae$x2cJ2v7tuZjE&)w}16rTF%j9Y(c$+ z&7?1#_!8Oy*;LKLt~3q@Ugu3-iD%?tk!g3r*&uK`lR&J#5ZB_7kOyY*@~%5j7rC4F zQ+O{5*P|Uyw27j5OH5ZAVt)4vuMH@&ApXQkr9@VhbtT3 z#L?j!b?TvOzB8vyYnf*P&ciU{pm+~eWv_ZW$fl>OKPM=GXyHB%P{BN3)P?w?A`q+c z5mSJi;1C+{j$DQ`cfO_{zIRv$ju^wu<;7;ZEDUl;DzE~6T~^Y|5vuZEWK5OQgUT)} zdO}Y89iY&`4O<_t?5KCuy_4&E`p9i)OM6a*3{X=nPg8iEmXZ#|*?FH0278c^LV=PB z@UvKIR;rIdOoDj&xS_NGWH6Cd9LQm4pB%1GuMXvL@z$(w$bd+JM64#911c49EL-F4 z2J;)XnhC$ir}JJq)z!#rjT9fpgbd{C>o=(U@fjowW{+?%0+0DDISmOauaZF4+r zgWZ};aL|6_p?(hxVhxQr*)V_wXl*vY;ZCnhu0YGnBe;Fks>fy5g`s|VpgG0Lm^{H{ z#r8=6_!{}A8*3{&Zqv1jA9RxYA9vY#YZtLdSmL(cf>lnt>5Gw_cU#blSeeMBt1BXM zNm1=-r}yHvwD0R(5Wi%W%#q;*n}!a76vK3Na1vlE7y>B|cFjA|ZaQX^a4(oZ$9&?U zQtaQXTo@-piq~Fp-F7?oM=_$Z`ukl1xz67Qf`fpnxS+h4YN?qV)ci%^_S{7y@d*tM z4vc=fGg4+_^V!3WQfUd2U?ZS|?_*Sy9O_5LeqL8{`z2bNo+Ju?ccUszpHUs#@tZyOw* z4)4StAcvnu6*<&MR=D)K)lJf1MT`dY?E!94HwiEHCq_{ZOxE^nGKlX#f=bkd@d01J zDF1%2({K8wbfd7s6y*j-xUeIhbz*!eF`9@?=kDm_nEaVY6d;h5g zB0T%Hgp%`EC3Dxo&aohs;0dJX}A&aZJ=22gC7D6Gf4nHZ?08eo%xe^ z(e&$4WBk-tQvN*GMLIFbF@xjA*R)ztDaW6F>lb=xTa>bRSO6&nJ-=(MxYo<8G8B_f zO-(Wz5$=v=TY2Fh*5%7_TKV8l^X?VST3fmQdMjauy63s&qW)oMU05zS-dUtj7liX< zNmX!|rWFO2Ih635byB4Q4tG15CbDxthf4xkd|btZKV&B?T zmhYr&vOZnqs7`a#6g)Um_tG~C3Y0KI_u||)cHfR`02=1PtuBmM@(^fPfc`h_!95KS zz0{a+qEhA1P+LjBKFR*h-L&@<s^Z|53Qwr~em9*t(fa(j#xyeRe-YWKpX6;5)_V zLL6Qw6}pd@Oz}MBNBnU$kNCN+<#>Z*D+M7CpdP|z@NF97DeolnV1UQSfoO95yoUtV zf1uB&$rs42mRH{f8y{u~#a(}ELF4ZjaiX3s11xL56u33B)#!K2yTwjr^PiNbQM7zt z!?765iGD{eJfv~(DLo`&r9aDD=9~8uc>ox|abtq>_UvgD2hY7XcclK#t6@*k&yJS} ztM^YTN{mlks`qwCGe?>8{q?A?Ad(cyJIcd~I^H$G?3!*Lhw0|-2WzGTz<#K=W<>cr zN;2PqaD~hVYF)50^bzx5!0S6mTX+bq4=?#c2FQR4A~$NzTm-#*hJL*VDp8}y{x64} zHb(i?q5KD)u`w)^@R07fg~B#OL+B;`)sf0kNr*0uc#Rs z8-%MLU9p4Bg?`VN>$-F3X6!ovRL0YslUv0wQp#hK0z-R&Ui zMw68PY6UUtXXUIa1vFu1g!&rS7y@yQ$o}pcStzW_wM8m`8=Npy$~sy*Eb_0V(nh*u z04Bf_0PnY?H=x=jayaWZUDX3*pcKqMO=M{ZPjzLzA{h0eoS}u zVB9XYuCA_3mtTkDe(Hz;+a|=F6gM^yF<}Con~7_htkh#`JU@AxXzyZGo!Uu|9360V z!ZjP;&1&Q+vA8YR5kOY{;%17DrLhB{8fW=bf?vDtIeRw=E_)t`A7-dPDTMZab=bHb zcI36Ik^--00Z!ar#`&}UKIk#;u2+u{@1}ojL`cP@B4RrsAUOAqJ8iMT>)3%<&>>@Q zZB(7ldIZPiM}FlLi}&dP=6Fm84Qd4LA`F(UZdjiI<|uHDN=EQzU7jNH_jVb4>9v6jPw#8mKSCtVFiI)Ryda)n@!sn}@cX4kAI+ z1V!8Om|#XaO`DCE`s@uyM#sN$3)dKSkB4;Z+Flar->zUAyaQD^$dbijv(K>$hXHV& zM9`&#Ad3UcAW87X=DBNEQ!S;}!-HIX;f@EN#6TjN17Wc+fW_=`Vh=}fAcv=r&$55N zNfRO0M~B+gxxPK>jX9~jEe?LvWWb;;S$T&P-s?#U}^MwPXlEK`_-{gd~24JfeL7ufj6C~k%5Iyg<=A`tc6A_|_Ks$O6SwzTam)N- zf}P8-F`M;7mFh>OBo^_rtpGvaZqdW28t2vcvpvVkNu$vD%w77+{b2)!Rp6(qo1YqZ z?+==3t@T(v>#2DdJ>t7o?|teLC+1!ABSm~VN^(heNN8mkhD^cqq(G{#Tl|t$h^MkE zE^p}-L(;f=Uxu7nvU%t3a(XEHbA{#YH>|nSYon#6z>rET`xyl1!z6!bcE^E?nBHaC-8s#{A4Im2{e(iaeOxVh z3WRHb*|{04KmgHi;^+zqBKrCi6W#dRZA8_+1-5* zIyh4vqZyDon5)Tuub0%WPW5{zrNN9YCR5|l>|2K+ZF7PVBCCLt{gr+heYc-CwM$Ii zEIZRiB30;A+OnOW9&)HGwHiwfBV&@#9Zvf;qPhgya_H5RCA!VN+1csfkOwg?11K=& zn(M~IsK7EMkrcx^fe+XQd42ulwVKs}@LKIqT7XC0K&4u0?#5rodA~7d$}%q{l+U&`^G%@YnuQ75eTdxFX^&#tS%F&RrIsE-wVQd0(7{jv0R>nX=vsX zQXsFuvq<*gPpZuY@yeLrlkL9~e+UWVm*#E^&1)7=3P+_qtik^eiAixojPvB;u#A#D(x1FR7w(ROYyxR<~~hw z8Iz;%Sf;LCuNXI?Fm|;dAFH(ENb%ZvGl_p#>asqz(|)J$J(XSEjq-QM9?@D|RA-BR3{q-qb!EViMDVRcZ3=yVTs87Cmx3tiH?)%PnQoB}oe>n$Dud11bx;gNRqubJzVP59$y~hmNot| z6Wq>-dC=|04T#KEghEXBhYTT<-%6#!7X;t|bY=pQR1#XZuFja-zn6z#Dh-MO_~1am znKXxGs@6@X)}^Hs->GjsaG6NFlKnb%^`;&*V>Toh15?>TKgy8ootMqXBaX_4_3_pP zt@;z}8kmXnoWk{@omYq2z7ZL)`krkEj(%*Wvno=LTug`W>&X!H48*3~uDaB4e`Kmt?{NVq&I^vGQDzljU^VPy_O3 zPXeFo*Sh2@#BqJYs3ed7%s=gA{K_3`lCUOEQX$2mow>xUlf>0=Fz3<2d*cvX%z|g# z*6Y;ygim}DcyL5Pn6vCndpW5wvs==^9!V|v>V=Iax~R-0%F{0|P}5*`itz?7(Fhbs zmpu$&vrEaIN(I%6B%%X!n0y8vE13?!Nw$lfuF*O!bwy)~pQ-S?w^lEIHuz(|J(k0c zf16ka1SK*um*4D>{!I2oy~)9_f$VyXphJu34K?3;1rQl3b%CJQ&m{&%@Z!^YYIaOC}Jz4We zQ;}*EiKX+^YhR}kBT}-?&Q4zVvDcDc9uF59*m)pXar~^^Y#2vOavoW3%@B1fU)u4% ziT;C~++Vu`m-zKiQaH>gA+{?x4}PiVXJ)0%xEaFghtJxWIabd_9}BIQ^(!% zXM(TTozi_ioC~1sT9Sim!)By@mkP#WirnUqTu+m}v%=a9A^w@dY2gpk%Hb`g7vK=z z>?Nz#)fv7miKYvhcFP_WhuXvKR773qn^3YTe)UU9-uHxiIRHDKq2F{dv7Hz0VFhi% z6#Db--3Cl>vJC}a5A0fR=b4K%7s59XMQU^%+URWud6o|c)Rn7OYf|f9EOjO@22yxf zj2u|p0SqpuB-yw($;4o_YkFoHQV=ipdRtDt8SfD(M?G-v4j0EuwSmhg)l12%1>-L* zU`Fel`HpzQ+3GD6i#|Cov4+pj52q7spen{nrcQyb;Nk7sR8d>!R@(&Inw5`L&1P0p zB(g(>X*PX)LvRizXzQp2Fq8FB&&4=%X$rHUETyDp2bORnAKhTH89#q$v9t zB#9^p2gyr^ftxfMZzOTI>iy2E=d_y-6(7I@*NTcTu^ZthO9+yoeS`!3_RH~Gmyczo zQ?&v`hJL=VdDj0iIL>OQfN0kqqPz)LxGx*pyheEI6sdspc_yuL|4hMzMZ=(zKYYe9f73nEW3Gj=H)o@ z8JW`Bf9}Ilo^G|;v!Oi8SFuCuj7d%rUMDMA9|G~UhK@h*hUJOI@_s|ZFMLwoK5w&R z-T)$|BF7E_@hj^d35QRb^I>z;fhD(TaIUi2Fll3mH*2MXKYjZ;wlZ!YXW%BtT~J3n zDLBZCP>abyy<{XsIGH!XPCf*HadG$!_X{h$hkzXUj8}B1b}=2}Hcn5gk_S`3BpJ)7 ze=F%(Hn#r8#|MP?z9K`(!!0dQ_kBMNpcKBCCu7yH@+`+jpK~Witso5iY1e6QhhaU} zm*jYWCqNE7m>Q(YN3xRaw`)9S5Hc@R?Dcpy?j4@;q4|+j_X?X%#p?sz3c0}dwxcDP zdc4i@R@if2BD(H~x~yqAYUN4Tn{X_ z$6rMNHugE`#3+ z+=gU@*obLBIrC=K(s7Y06YwL5N=;Mp$G$qVim~q`>@P3Q>G}DG>n*yIMHenkH(iI* z+4A&iAD1mTh-qT=T;ggKZ?q`J^Vkp1vDNMM$*1jJ zjAe+vms}DW;_LIPP<+xd=`!9~@cm4IPK8XUWlu?X-NeJR<;Pu4r(`-jZi= za@$btI{4oYP}$!5R}BM4_qSDiwvl`Y3fphcudL!0|C}u1LzXLPFlC#xbq^nR?md_S zF#Ulitpx}6IziF2&uN=|3pry}icifz&ZFFUA{u1G5-mSZeL{Y*M>25W`*9dp_wO8|7R2*e+W36d!8dULwhulyFt z7~mqknfP8lI&PJ--EMLD{a=HV9a8A{)82=ZhVX|eJ}1m;WQX%i=;4y`x$YL&vsmOp zuPf>q)}!<9jY+A|z-9q-(~%CPNRoBo4E-QuuMl5Eenoi%^4`1yfhHHU5d*2Zy3VeH ztRWPW1Q0d0zfg}qNj~vlzFPKH34zr2cf_==Q|?*I6Gp`m2lzdLLlo}wxZEbXd2=H` zSj9j=zEv1vL#)v!zxvpn%L=J)UlGxHEiBYcy47n%r9dm1Ses$wv#n-(+%F8<@+g=K1b9)#k&S1 z@{z1a+O8K)#+SLxGKjwx3K?3k|^%)3&X^!!hp(enf&S;xNLpKpY!kaq(SBR-( zwG5p#@HzhW!@JhBgV3?JJ^D@uP^<}wT(&n|h2tP|+9mrmjwP}su>#OxT&!!T=s{6n zT(?G-s)+A}mn8r|%Z}L$m}t`u9c&JRk2GIm-n4NMy3BYMGAreQZxw2esWi?5a1nQW zs9xyd@mgv3!=`xmu7{fhi;c*-I_3#?9|0avl->%jO zA!e%#gaS?Wsu=`s!iz;06po?F*1cjC}+fRb78?jqh}{#@Wh! zHNW!5ui_R8EC25!RFA9n(19oH^lzkF6bHvT9}(-LIdBm0t?br|?E zSd^khUs3w;1j@XMV`!JCriu&$ULeiyhu+%{y>D{@oE&{PMco_3pwsg|M0hWHH zZBm*Hj_-o&EjI8o(^-R95yY3U3E-^^ zTV534^Q(C6cJg-^hl&!$KY}fQ^J8ztN}mYJSKb0u!w59wP~ZXpm+l&xL5s4C9uN4L-~R8B)Rt(V6L&k;-D)3`&U>LryQKtcX1l z#Wq(R8x`kTLir`V$DPF@8K0(5*1Ktf|GeS8E~^BI>givisz7B1Wh?x9KvZ>ZN9^}G z1@U3;Ze5vFMx(Yfdqp$0_(c7d=-~t{06K-W6jt{Fi6;QNXuQtH@eX_?dfreb24LHc z9Xq(3Pzec0E@eOO2kA`!j~eUQMj!hk7$<$T`4uL5+j+`2!27q!F(%F1a^#t>r9F82 z#lLfVk!eW3=n0`>tiDG!2D#7xtIVl6U|7uDOi4$Ee#RnWk=7YC-jve@(&BpU*A2p*-sAovwPaRuIrB4#(0sQXgm);njZ;Omn zdkq4|bZ5VNaa5E@8zT5t%1mmPS!nLc1<~jNu#FV^y5?GQMaGm~Tc1C*tVlEB#20qI`OuvPu*Kpw zW8!mv&%FLx!9{x)8Ngpn&MAwZJJnVb{A6oBW3CAaQzQdZBrLy7VQnp?dj(5$$033%l2z_@)v zwkK6B(d>Fc)2R3|qDuT&6e0rCE1Vk?g%sP#OG(F{nnRMfOdtNJ8&&m!PB3_+@D{_6 z(Lyn!`dEafd45;=G4P6fi={YHFs!t4Kvhol=9*~t+s3|kPpJ54!fyUH)jj?LV1CS9 z=pu(-Af99SuH!d3A+K^Q{!em&rTy+RJmenBAGkmQT55f1udgBp;`I8n3FW1iu@>Ft z=PV$WwQYt;Vub@=-XhB3v;-JYldeR;w0ik65!3c4&f0z6S6!(5j*AN1FVYSQT=X;A zz)2V~kDA&%$z#PNp~1Z^tx~h#mE#Y)0;{nH4!wrA-^TMOlh>#=UYzYJt8=g!gOG)0 zk|`Om2_pW@3K`oT4|d%um4=HGEA0yFIARvHC3GkK_tF&YHOL^Z5s_Gm0L-5eya#^l z`<`748c+PyWsa<5j~pF#Z>-)WL2tg^73nF@O;PT~B2T~4R7uiR9(qhtK8#7S3SCeI za(#m5o!q;`N^-@G`5nS}Nrr5E53 zHuGfnkDd+e0`PX#TVK!@fO%kzCvQ-E^RV9^{+Z=_vcg!&3Q;KnFr93z;tqCLUx z<7NfzqNlLha}bOoYd2~-^sx1A3p$vLB(?|z7JGnTHXy3t$qk{Ro%u-Rn(?*!c@*Dj z3_5Z}V}QjOVGS=mVq9h$dn^2;Z)uaG;po1;*Y*SLdXI7o;RrOMqV7=ry4*CGV(G@K z(^E0%BaK>p${;>dg{^=DvBlt{xre;ExH#%ne$+S35-uw5vA3+6ci$ouZ|T8PEjxrI zXPF6S#xSzoCu8ypL3avTUH#KIcYC};#ZD|5^lOU-YF+L4Dc6%e$a%U7IM!5;+qfsW z?|mt8^q)SyfrJ5?SKbUM|6gpq1yt61&^4-v3L@Rz(jXu$El4OO2nd2SN`o{=hX|5_ zw1}Y6-6bVRs&t2hbR&rH&Hv%N@4fe1cdg@E>hT<(U(M{SF|_ghL(|In1nZeZI=C1*$}CjXz>2) z1uuWP(dX>sVg92aaek36)`vPg9z+~@gc!ISrQWAj{qLkt7ZME9rh!C187sG{T`03z zyndM42t40gn@sTx)!W;xXiMwByBw)pkeJ3C3)!cV+-kbCn|hQrIiD&LX0n(smh#zZ z!o()=83>J=*X2@u;I<{#rDWvoroQuP;P;O3%sV~vi+lUOrM^m@xX3lAbTLwV|H-v> zjW-EUUj}~97-PgUZJM^>mAZh6bW`R3yzJ*oTs}cQ}JyaS8*NnD*3M zkgYm_$4%(&!?kFHT0lMB``AHq76 zamjhaB>8!$6icX8@RN#hf}^#8&#~lFx{-rMThEtez<%~77W9-vv%h^CwVT{L^?MqA z8Bu_M%^W;V!gl??)3Ep6c5?(#JQN3Oz3zlxcJO!9Hzh^2a|5|5#Z6I_FOAA50#*PRwEO1 zZ4O<}?BvfjpTpc8r@GV3mPb~)#@M@Sd!mBwpXzUXtPBzs=e+k1?gTl%xdPM`X8(@& z%^OeW_uP0}jI5?qHW$%aRK=X`cL@yZ&(jpPIIeDmKQf@s@z|KCHfOb`HtxScE$X;E z*EZKJW+}9tcmrVDQ~yOi2rZfygVR*lKe8mk6H`nv_%e4wuRpN5%I}Bcuy!*0&i&NU zzhp77ja(qB=;nAJE0h8E?C+#egl$E7(ZWlb-ar1`xcJXt$xuw=Vkj5 zBNkDPs4!BDIvC^)*IdVa_Oan>q^vMQkwfB`FD#MH^uaqAdmRCTAEYu{H@#VedQW<0xCjw z+Dfr$XAGM2;L<&BK^KqJB*BPe-3}G+_93Ue(eEncCa$aenG%0<3JHDV0c<6*JuT}U zyK&sDr}Kl&1R}x>(bW6Pcw-q$zYpKyxKCEt=s9=;pYh22w$eDXcPFXQ3{F!{Qm zLKSz&_IOyn(^;ae@U#5*HLwnHRSA^(iSrNySdM@6_ht^4Tfuzv8$=&TOWdE5bjd_^ z_VhRxuJ0)c^2|(N(_@JqPC%h!QL$Y2rMD<_k@;x{{f#I2JZI0R1n(a+2;4gBH`!-S zuNgHN9RJO0iq@a;^>;}_VEsnT>O{rROm**r`$~ln(Dyu?1c1(c4Seq}VRJq11ctVKFb=BciIm=tNvRL9p!)EV~2`9@P7NwA#Yx$a`mKz;^y#DcgIn%Y~-to zJm*14nE@WI1PR6)D@0t9liBVn=@OGVmc9n#t-l4VPjB+>QF%+a2A2sA4_fTLXA)QK z%3J@iv_hM?!e7Ck=j`|7h5Am5tQCZWF`c??(@r;jvY50g});DX*LLv{`UJuQ` z_dMK)Nl37G{WBqQOt}iO(f5*D<0;kY_w8&Se>eE9oWRYs-PQh<)a0%vym58ikemncD;{jp9#PXQC>%^(V_(ap0k4X%My+^AN50sP*pvJ$CBR%O3i%2dy~u z)E>Q1NiE?Be5yBpj?vMP#(D5Zx70k5hSBA*r`X*C%b_$jiq;_m_N0yaLjx!TcrbELJMw70F&XL7&u=3z_sGUG26c`dDEei&i~P?+pHb7OhZu*y0XdhHKRtC)66|37k!g>I`w(&K`5)`#hNDGF zL~CDr)~&kkp4W2m*^h&P>J^3Ggit_9SQP*-p2+b8MK?H&zfGd|g^?quHJ%pspoAGd zCD3K&7Hs4NINpF)0wP-4SF8{+DQkJU|LO{&84`YF>LJ}r>BnYE4cI+7vS6Gw?TG5X zZ`37+O|+`tk>RxP40j$bThIczmE5H)ikpLFP$ItSHKaRt`L zoJnivjhgzysqx3dLD7tNYg(SJ>UUBszZOg>qshH!NBwC0#(D;o$j8UaTLVdvSA3i? zHkxA&?hP5$2t0P`5#Y1zf7h&HwLt8D zdtLqiQ&y^M6gojYaX2U1=gg^Ig0Fb3kGhY7`OUr4BZ`iJ**IY4j9hgrJ>P)Vug=|g z;s~Ob?ts+x8*Kx&r1WgybMIGJk1~i#Ix;ABZGje_EW1y|Rf@Apl@vqFI%Jg0qMF7D zb|d>n>pp#KwZ2rb;d<*bsqN+kpFjy|&vnJeFmkfCrw!fH(QM+i$?Z zyw59jH-2Qj0xi0Ua}{q$D@T*f{$3fj=yY*v zkd@`TVI1mw+2B2*k|C!-eW(CeCJS;{B=Q9$j*)|abYzzv4C)Vc3Y{EMIvh~a5G zfgzkOLZ`KO6wim!Mo10ZjDFEO40CAZm@Xzu%=liKqxstV>zaL^asf*XdYn;3; zYok2Y=;QNqcd%f*52R2apgCGXaD_fvH38#tR;EjQ><*mMTbf3XcL%lxjW4|YoWdws zp(0;J95F7tR-j8`=D|JJc>a2}KdytS&ErJZB9+;sJseY9U~vbCI*Ubr2`Gm$X`EWH zg+hUSRqQHtUVZqTPOx(ts`!&hi8wHXzsG`6tT?5a&b3xq@;##8ah2vDq7L@yoh1## zRtJqHU{FgF5JSykIbl3cGmg`b^mzdf0Hj9?KgJZI=+AA)~Zu_a$mJq}S)f zSr!Bcqn2RBT9@_iBcODN_kT|H8+vUAQBqAqw&}Uuo+|1bqs&u(YeT;WfORYwEe5&z z5UUiVsr1@je@R4qDt*jKjE2#QjD&q7LU(TBBRZS8wN{Q&C9hZ`*)t6b)l<3VzWV@~ z3J9ZRP%`IKftYXqt$?`xbaqn^+S`{!@oh*2fEP+tQ8ui=J$mQeuC1yJ`Q-E3gm;1#i87}3G zu@<3+N=#~qPphkor_s8^4S4H`3>bxLMLkvvwhhdy9Rh^d`nlx@9h&e!ReZ2u=l281 zm=ZQ*Vc^1a0LJpaxsFI4JXvn5Wk+jcvw6ga#2No{bRbB!E(c>7-BrmfOcsSx`L#W}faB+~JstDZ_S^=Y&`!){ z%~Bi7s~0S_DiTMg2 zyU>Po!qOz(=VK%D62u*QXkZAUbU+;SQSASFvY(%6?I1pIG^TSrEERz=u8;h8vc*_E zFEP;d&#fC%!2>hd*sRa`wL|2F3a?;O1YY(LvYlWZ`@0XGf@sOpKP^Nhnx8fJ9DgZ@ zY}W0*ClM`iUB53h$pluq=aGX=#l2^>Q2bSoFLoy~hS0vKLWHOdft{V5Uyw1Pp@nWx zT!j=0akItaNsv8iy}_XfQmec&0=Vr*AJHTy30U1gGPh2m^YqqiOOO2oY|TgU0WWzh zBCBfWPs^ZYgiupfzyC4^WKZ%`^H}YpkcHR89cK{lpzjnX288{$)&rX3No-iQ4g`y#jcc;%y3t>7(gfN3w1{PVE!>Q5Q_lJG zqblxdb?((^kJbDim3GrbyE58ggselp`%gcn-WZD=%NBKBdHcizvYRQK)ebm8IzXQ2 zw)M{b(XIa--Q z(3&-b$}vCn59D+A897WV=3I3%>Fm@{WV7SZ^oya@M_0q!W9KeHa7nSZ&R( zEhNzrSuLSfvdUV=>iJ*($j9gKg6oU zK#u7&3ZY)a38Lmg%r?%ai`s_XgQg!ZPL%eFDwheB9AFIbJRX{HTE02tu?3&PI(Sh6 z081u`ocI~oABlx|R=%9s-rgU0fUrT^I7Wuo(5sfRF1+CcD7ygG;Rh<+mk~P&2$}!G zOIXXepyC3HUuc3?V`mM@y4 zSXSn?ZH(x?Ui*XE<}c>^O>Rl$%W}~=b@ToJhMB+7Zu!)*Ym8DmpfBkFQUIE}yx3RU zpT7CMm3i_R7oDkJ=yhdqgFG$dx0{kebp&zkPRQ$_qvZX_Etvxf*SFvkBlL@5V{M+r{IUS7aJc@MCqO#bP$LekfN zRQ`+^x-_sj%5Wq<)j!sX@&8y|{FU`#>7XY&qG=f~HIV-%Sf^ ze;1<*xCU%1TMeCHKB1@hFi7|Dzz!wcMp*(Lm=!J~ILlfi*JhKhGj3q4Xl=5Uz3GHRlfsx#GVs@)=Tu zdD=q%D@!`v5a*#KPHd8p8~KgBXVw<$KbpMqMABsYel`_SKhz8OwK`l#x<2|?=Z8@v zDW}epwC%my*!NbWM#s`#j*+@xACRg-#M_<_h8nnzm`uR&6Pwh3T1Z0-DZ1xzJXm%pbq7ke1jUM-%n0>si_OLGmJ{ul94#xRnoE17R0mop&UW58p7 zN)#bSK4T$@fY&>#P`B*dyZoUcv`jI?t?4>CvWOG1`0 zEeZS%xAVfWT}XgZ<_Z-2P!r>dmQPQGqw_HX*s4!P$T8+U2F2dEwm z7?l!srkniWfuilaL6lGR+^e)$&Ltpo!O#D^Hs>C3u|{qq-+$Y8mi!<@bYQAgbl_b! zf5;Rq>Pv-+K+;bceoyX4q-jdSj3er99c!I;%|95X#%h$@#q6|=H~;p`#m z118%ghr+lMDFVcvbU*nd`d%_0jAOYiHCDDtRQd3KM?7H^{I{8vMsSIdU~en^_i&S^ za>DUdZQ644!dc&xyRV3rm(5AP=Vs9!&W|{Qy~0r6(Dy{dYIxF8(oeED93; z8a3zsuWheL-?-r%jxUyZ>_0=Sku`>3-f>^f2x0O(SX*S5SlFDX&NSwEuiEO(t(6m} z3H9+4-d0l{wBzpiOIJPDDJ`t*(2l*o_E8*6-R+&?_e&LXRRZ93Ih{NTy1Z@_@geo* zaf|`@l(bHaTVF~%Uyiz$Dyo&On$8{S86GF*YOCA$qF&YTf+qG+;*tKW}bo2nn5DE}sV^AysH$@=&N9~=Ijg~0>6N*CPdobIoVh8GW^5F*aCtt9>kYM6+6&f~f&^?zKumPq( zb@CL_DGOuGgJAfnk^a zK!?t^Kil&r)&Z1wL-Zu86>02ezziv&OE@ite|)xL02t^O@b~zw;pQcR=bKP|__7oW zE)sHiX3GopBVZFwZkl(K?K78x?|#@j>r838`U8NCm8*slCf9J$Ztpgm&{kg~p!4e| z=(k`yn_*gvMr0;ji9+dqb9Fq(BL=XZ@fFh+n&wOs#L6I2@DETDK+njfjV$S zIo*){79is#c`=<|&3xJ);hfpEU&5>XA;(?FqLLEz;G+upg4Ix-Lvf%VF*j!{qEQb7 z;OQ3|%lTDZa~(urH8?4U+H%N;zCt<2HrLxRLS@KcZ#*sY ztiYb(xG^D;=?4(IvR|?o$gFEDqPBLdIk86CYVe?%OFu#exS0yIc(^dcS!T}qB!b)U z?iE}bYa$+x zUz735=+aoNCPOvEmhi(qVm+l>ruc})7@u*9))-?oFr-vfd_NB9^dkNIwmYoxv6VQ)AocbJz;Tm*7_9}Rdqau(z4cP^^ z0s0N*1E|3?7N}MBKHgL34RL^`r{~|4gdrMl7k4VU^*vvi;s~M02P_u}io`Ge zJK`nk%V8xNKOGLiqPqw$s*14RC-l#j;09;F=%v8>RW9<5qz^`hi!J)nxWM1boAjl5 z#Jbpe!af#B6PgA3y0+5|`5O(u@c5>wd?rD0AcZT=)cBM+_I#Dnj^O+;&cY>he)b~_oJPKu8+oo^HXrU z%^;EX4pJ2uX+18pRL{K58`WbSLD{Y2?<=t@vpqXba6uT80OR7te+}~%5qM2YQOGH( zz?$nAmwe&p7wM!jgfHNYUPi*b$2p^wb%ymOEzH-r{qvhXj z!Q)y@-W})uxYm9=XiRHe?47Z1J8NzC{9WPMNx&42h z^Nl2~Fy5cgB%}!0_+Q#y9NPoLbt~_w4+X!q zQHws00&_{#fRlC+poFVJL!pV@9iRrQXK)Hm5_4EUX z&urVI&0f~v%#JDXJhauye_ZPVN`C7|($jqpm#N+&(HL{9`_kY*{58|NICXdwzN*`(CL{^IQ1wvYmFvoM* zUF@;aukC93w+r;(AkwJDR`~-mjlH&38y4K*3WrT#+oAv=Qw*o^LD!XQsKE8;lib1v zN6WcLwcF7A6e3bZ7?$p_!iLZ&RP8(a{v>S;1h1x#kGo5cLpJ|pHGfy2t2$qee^+8X zf>8o4^xd{vyJP_Y6+Do+m~DjZ*JHRShdNB0)1%F~#{O224c$OimyGXrmMZkCV>aMN znVcBRkI?PvO_M1$s|+S&M8zlknSk)B?N^0Ec+T`2E9l=#-FFv{e=KS1)HBfV5a@HX zD3Otr@n%7onfUULzo4feH+5XvI%qlPC&C?qrg8jZeievtCUA7frvgmArZ1EHpeS$- z8vQ7Tn%FcxM1ij|IV3_LUBt=!WqA1WqR`jn_#CYqa9ZlDueH~xIQFmTSU;Y5?9FP(9LCg%oq?H(5Ym_812A%zK+hcaI3*Lr9f ztWIdtxO=aP7_F>Dcxz}(-YpS-8?vLMD^{vCL|in+*`U`sISmS(&lqGK)SM zHud*`>dwD9qQ5bhspF+$#;b+@X?+x@l632HSn-;olu+MGcTc>2Ku*S zIJm8`Xd~y92xr&l<(2@5APHUmMZfa>yr*3UF+%Akq1kHPZnJjNU9N`rV?s}Yex)9^ z2N_?ZibcKOpr$sgr4JTMB+v4uF5Ol1lWChb)AzZOp|*xAP(hCzHYMInz;#M%jXk2- z8|3~4DxERRv6*)xl|XNK9z9$Lp1UY$u_52b7bD{hG*LqG&z;`@89ISsqh6dVpyrJG z`9YBpnBKw~2+S7bVlH{LIoxIl>;{64Y{6-<+X1iBfl)^W{zLGBcac$sL)^2`=Qjax z0AR-&ki@k$S*gZNNP-r9!Vln6ox#i7O{dmF6%;p$w)=?rw&!t~wDz2W)Cn5~+q{^O zi&(^H|74n*{9vlyBAPbhp9Y_4xAmxi25ZIIki9V^9VR>>G_NRpp%>eu(0I;XVmm2n z(lOf-A{Ls|j*XiLiGst*7~`D+IiG11$Qaj*acY$j-m=L<_ajIi3WWxT4NvpBj6Hf{ zSt6;#T<%SRSG1_)kqQ(A_XZ<&ptG(k6rVBNN{1zxD;!zkHe^%QJ_%1r5{x@mPI+b~ z8r>uDkRoAaE?6QClmw_tD>!jmB@vF&0${Y4eESvTqt3|8^c#Abbu<;^cy+wxVKT|) z-h&df3J5pn>k6)NaSgkxeo{QhXIO&9^w zP0Xc2pCNRiC5(!?Gu9Z?JXd63VE6@LDhIkV7ED3&<=&1BU?$p}ZZr%d<}lZhVeCcV zL|#JE@J+gReYdZ*85Je!``chFA5sSyk6V+oD8VsYg@w*th`g@|kak8Ajg%)P(xq)o z|FN@=#akk_he_5*a-`$L>`oR_evu-eweyRM?%x$3f=#Q5TC3c($@=>xrajz)&n6GI zmTeh`!Y0*?$dT6^co80JQQwWJ8fRss)q1qTCYks5-tV7X;KZWM9b5&s^c%A+=uZu= zfVTE%z&FbMA+-Yy-kyojPgV1VNs&ModW9*bA3t_A!-J4iU_Froep-SGehZgPnaj30 zV$Lh3tr+qx0B-NE4CY$wW$=F?2>b5S70+b`soR597cA}r&`Dlp-aduCmv}d0Wa@9n z==dur57b#Jh8r9z;0RCgXA+lag(^_l!vd|owk&RUsaI*;BjUr;-Qg6w-6Z6>Hv2bi z9eQ@0){A{OZc**%ve|Uk2<;2%`DYY7^S`CG>;u+=&=4VHE1Me7|IoDe5(M$5eEI7aM{b^L&%&aYsp*Gs3W0B|O0hTY=W&0e6nMr2rKZ3bU8&u)1ha6u zq|a0a)%GiVCvx5xdL^J;9)L^HWS}#FC%xoGaM2p~y{B42P`}DgW$^NF%0lr-6vkD`oT7k62N04djIha^C5m|t z%M*gGGF|QhpEomb3fjhbs>rk7z)TVGmYH(Tji?zdYqQd*wPidc)`jN;dyu^QIK&s*5OYmeWE{o z`t&w?Rs%6BNOPLsSOruJ>1uTY--6`DdAgc7Jb;*30?@+qDZ(3MumjeP2YWR zIz8n|vL9N7bIBGLC@wdHbT>6w_0<^&r&Gt*X9~5K1fgAEoR^dlMKz^|->0TN)NpFe( z`Bu_!Ft7-a=tt&V+%{7G%35^kTJVVP0Hdr7EQzJjFPxa5Z>X3elid6{5Q@cyOtrPM zhx3~^WTd>nUFzR>KkWzLVphPMnm(~Ja)E62=rer@brKBv4PYR0g)e#QeP$oiFj$i_m9N zr#+RKeN_~vE^RJw?U$vbwuR7oINWszS%tB2XcO zSscYdlrmEcD z?dET<`olm|mvS@OH&Mzf)qSZq?cv$t*}DNlNBIt;=dlxk zzghXpfh~`PN~+q!w&?m0g|N7We}+?57qGu+w<bP zmusreFN-XtfuJZyE!>$?_fM&pxVQf!jUXj~58g|a-j8WkQ5#~0(Nty8Iz!_KNQ})O zC-1q3m0B77{hg>DxD*}zPESy84#)2aBsh@I(OvkWV1*O9(A;wmnH zwC%Ru(;8nS0&WV7_QXo>u5=}6O0-R!XmB1kdSQ3qrmFPDy(1%>6xsP1j*A>vZ8}{8 zFBwgH!aNObKO44Ux}V&*xJsa#j^trZ=_iY24kgy#3!icSlT4wj-u;F9yN;D8gh=u# zkT!G|t?)3~1Fl=;g_pu==q_rbaO~Q*U}h#fP9VGh19&zU5d@cSAto$&q`4iLRJOAe z`A-^c#d%{wmyT#&fGXxq;GF$TrvQ4S)VtogM%;~B7M#0Zmu|ZrqzTmT=G7h?ODV;1 zZ|ZkjA)B_9=sOr>%xU>#bNE+lG2=5EyLxfWFcP1pCFhchsK^)7R3iBzI`P5RT(&wI zYQYbAmz#t03>u^!@))w>A7Lq$lB|o*Del=l-_Z#`;Rj!IYehOWTt4e+Hx&RNbYJjM zNgKU#U*8@B6NTV5g^WPx{qgw$OQv*&Oi)~{iQybJ+&w*AY8Iuqj^(etsQHl3!|$h)JvWNd)CD~9N6bVTX@CAH#)?}Qf0O6?qi zZa3koJ+VO>7c7k3X1toCIJTci6l835_Syjzb!LS8s&02|d8!FAA*cWkK4sct$+!>< z$`EEh;Ej+vaLTCFwG4a=Y{1U(QrvrcQ(E--@)P~5q5-_PG`CsxDy-FuU+@0vWCX=L zUL#9Mab@6hJjY7v41OIfw(})Grkuq@VZ}e=IL>G@cGB= zP{@c$w-i%^i))_|Sd!RYO@2cz?2QZOFJk0gwEFo{2O4wQdBd<{#rY62j`bmnP$HgAo*pu(w@?EXG5Niu z^`WCY)6ZCp=TMa(-7NAA`Tb!; zA(?uU2FvdZ?vRlpzCod&!C)Ik#1>v6W#+Oum9PJx5XK#FKd$-pgX>N|jP&V*ZO;Oq zBlls?WjrE~d_D(04lTmtW@KQ9o55aP4#FZ$%FVrvzQFb%TLE}|{SE1|K;bnTv~)0q zyeb_O7!rg57l{zQ^&7lKZCu~Ff~(?#>*f?qkV77R^ARL#Ey+yzh!`MK08d(ya9=vn zzJOn^nl8Zy4c#xp!W=&bJI}iQMg}&eh&a_u2$5c=xjlk6P2ZY{-MRtp9yY&8ebN+( zlPy^%{M{R9Q)%u+?0&-Gw4rTN*=hgoDrGP**X8$mpu!39#afIuo$j5s#?EjyTFOuR^~`u)5MN`usKO@s6s=zE_g;OkaaJy^jk>gM?EtSiM{}t;=Ya%_*1^< zYtl{QSIG}_uq_mk$eV@-y6s@3JGbbTvn{%^X6YL?&QQ=s*uZ`y6sWZ%SQSi$ z{T*SIaVpRL%;SCr=1Er9niOy{A;zkxkvHDU`um}0*Ski$6Zvax=IZ1_bD_F${=BNs zQ^2WzC6wE@?Ffsf?wx*2ZLJ-57vvVi(}!ayb#wY|gJI)~9nc+e=YYn_nf?OJIP6#s zOErZJ$2l{HA)VqERV}768V^oba0;VJEmW#En`EE%oDIf?CziN<`l5#!Bo&`0^5Ieb zUqf~!*40JPfV)j9GQ6xQlBCS(;V-mtXzP};&obD{e~p?KDhyetHIfnE_Fsdl+83P) zlha_Uv^#10^NHu7f#!4w`*55~s{YyL%*eMfZKAOJddU1V+8MlmxKr&+{$ zo`RRJI6N{^M5QIq%6hQ57a@a6KGhkwUHc4EOB|>}?}1U1DzJe`F-8F{`v(Xo->`Rq z#zK#;DiUt+JS^a4TC8~fTVsAK#ew8*t?MQ?^bi-EJQH9G}1rkzCjz`e`PM&UQcnM4#V5yLLk>B-Vwd)TCC^?3ibe#m_!^65O@j8Ox#!yRAjoDi!Rx2@Y;i*DtGQ z2u*LX5o+00p<##9zfnz28_N|NpDN%_0|zSoLPIOrAD`driA0*Wf>w{7oL%4AU} z-7HWm4pwi!j_e}(tEM+y330;L{r;rOUJW-2OJHZ|yX7t-YOSD{*?peJ#d}kcExwJ{ zkIVOGYv>Zw6dV)=B?{kA3F>_d8u2}sJ=ctfF;fnDTwR-egL3l}U3Z}*}<^-w}!Uszl2>I@8YN*_vS&)uc)#t*yHHU|t(0-|j> z9fM+rbThP z{MnvL?}>p6gj5=-&i3rmR)*)sr{a>6pfIJ^RPIbp@oO<6R)U@J;t8e^r(ZY-k!}2= zG&45HK|o9`!Nk7u;Tog=$tXX|!jtE2(N5|X;TuW`xvW|_;_Pp7YhP>#vg0;c0=9PbY`pvD2Q};LFSn8ZRUrPwe{=!3b}g0F_-;YS9!uny=Z-ZM)vTHFE~p(Ikl@7|&=;I;6=p8;Fh!v)^zAs3Rc*(pjxj8Ha%(iXZpd%78~ z#qaxNmuL*o-@M6J#_z2hBdJ5eH8}4*DgW@l7Gz8?BAPjw$2(e ze|z+z!NcFl=PmsowukS-?;QM7Ag%SzQd=Xz!a|W4r|2oPrXxOb`Y34IdJhraKaub= zPSN{GoiqQ%v6H~|#c_F_WuWEx%(9EB{YJV!Kpy+FF=IgH;9u`xwZ2wB@qX@|bO2TM z0_XcVi4}Jxs5YI7f`f8b@6x!p+QdG&)<%YBY^$jmM^Ot`2eAbwxCsa#l7~A-KUuzN{PeRu zSC-iTE<}_sExJawl5^h!>3HiH-Uma=>6}V*y=n6XRV0!%*L4Yj=8^65L+y|#`fYJ2 zsW{I|DvaxJv9~iGJ#i}`CD;l^k&mxaNsQ-{xa?<3j+*@b@)j6xesRKfvev6IodO%u z(v|F5L}^JDvdf79#;^l4w5K(z-X{i5nczQG0JB8Id%#%VAQW2aC-Z+H3Bz|H-F_EM z_bHNAa_V;7x4)2Om{^mqZUtYSy9Bn4F-wwI%BPvlK8cVEzrm!ZoPw>t<*u6sOYAtj zVY65CH_MgfNLrPe#`$sQ+Qh%RRZkpm))QMTeJA!fpe3hXDhL$P(q-w!0K!)bs;$v% zN0DunncmcKN1XTDqd&JL1g}B`P$cQ6`T2)JcK0ZgP0ikcVYd&*J2ePSok6G_JVUSZ zj+#BgutuS%A$k;49U3LPVNUh;+pqHvC&Io88Xvj_D@Ge1Hr~S1^Pa-RMI#DpIk(v$WhOxp!NqHB567^3 zgODICbEj$eDmi`9jS!R=;)_y`=6q*NS(s*sQA`n)SOI7p$~(&=568-HK9Z8!YZrfR zT@Pahlr`jP6@E4^gQmz1BxqQEqC+FNN|DK!&OnQ(FqcsXW%8e5wB3!Qc@rA?*YUfwSjKO7#(&yVB}UP|aGP2mR2TDV%vBe<98 z2_Y=orbImjr68lnO6k*>qB|L^B_oUWmqsQ30Oil-fQyt{ z|E8)InE4q2)5TmT=Tmrp=jDKS)r;V_uT8Zw{y;*BnX2FX%7WOCk>;aky;COvJoHbd zC)@n8<%)&S>vIV|@o#Ph{5>U%Na(CC-Dv#PUp{yB~+60O@ z;v3!;v^SXQe`StkyoJ{5&O_Fw7Fn+`;3$?-tB5$`c%b!8ac=NvsJmJxhVRtqZA=J`HXJTI+N~c3@ZnEvSJ`7<6*P(A6UG z$5T3E#L%%|9mRK5c!XH3NDgzqNzHa^Po476reW3Ybmrs4 zSZ={iidInx+WvWq%#`g;5qVIq1VjLpu)WkZB33*r9%M=^kXsb{c)OKA100OmqWHwdAbWq>WkAV0@T8%UjpGpuIvrm!!PJv<^O+9XnwU@%=Ms*AY%9~*4GeR z>ZsR~$I8{$w5VBHa*N=-3FlS(bbOVeW?juS8;C4}&HzJ=^)+cXRUop$y!&(wa6rju z7I%S_R0E(1%$>}rL&48$C|1Atu=e7sN%c&5a%_kX?5{|9;;+)UHL28Z~8kx6Un&qW00ZZ$ATe`pUR=E{3& zqVyJv^BoNLUwU5&`@rY2Hu5$ncLyflb%AK3HI>5~##@aH^@lF)%thqO7241}35M@v z2p6DlR^qlLxyK^T0 zQOfn(DYASBovL(1xO_5=&w7#F$Uq4hbAiW{P0k_y=jWJqI$CP+hBYrpx$Iu-jfk+v z*00r93+|Rd?Ob>-MOgib^a;!_CL4q??F;~S-76n0w4H!$!78pgO1dGlHarv7h>GMJaa9+(}E1_x!pskIHseDUq zpn@viz;{JZ9_DirsV3c01bd%FxAWG_muHO;Xp;OyVgJyVvUNZ8)lusWEB`Jbp6m{n zb76rHyCG(|e)NE~;^5T&oVmXYmpi`*E9DaM2+{qdKGB+@8!4{EAlqRK_FhnKc^T%g z2{44uMFz?-Fm)&b`SJ@C9UN>uXgL6FyDCge4+=s*tfv^?igy0NDE+vb*AE93U{=uk z*=LVj`Cgp3{f4OZ!nTPhFJx~FR!234)*R*?K8caOWzm+lZO@{Xc zE!|r3yt3?aFvkWYUa>q`!zX=oP_(Xq=Z$bJ)9Jn1+P&5@t2Fo1QWNDqxD}p@Ewv7e z@EUASMo?-i0UBgeOrwNqd4S&NioUpf1ivbrzB5HVK`}25{{M()nq{TV`Dq+lv<>() zoI-X}&&SCgwks47xg^|x?&`w2ov;=zoqmQFo4|BXeM9_!pd-h_9q5GdRpsy~#H=71o`p04*nFDo{zAx-h;Kw6jj zi#u{>6dzM33gvdtvv(5CDq==4RsY84fa1goREADE!7n8)f3{|ENGcaA@O)=wJiy== zTwZk4;M4#U>%S$Da;KXp!5iVVg`x7MZDGW`_oZLch_iFWbEFk4yLa+E|N4*@7}NL?vUD$}rl_QY^S+#@5@^4*d;6>qt2 z-)x3n)T{~G=3{2=&e6voHmpFsBzh%qy&=Z;>bV4Pk-#om%J-Z)Kl?K0)P@+-if$S} zEv4W1Vlt}~5Xm*;?Pls-;9huu$aVfauYZAK5}D!uG6EooeHHe_atQR_dIavYf+Sm7vOP*s%5^NC?;DF-0+?R=V@l>YLoh91@NS$vE94Qk?%*3mfB-%|dG-Y>;Aj z;^X6^DXaPXgdq+{udC8>&}D~WeV0h2?U1jn)J=w^iFe?%a9wx1OFLB4dDpQc9$|`sSzH>pvsaSUEzul4K zeC`?At}_kM@Qs)aMzhY9yJLv94u!{{*m~2;i zCc>8^m`DbqvT(-P75jgHagCHwJ5gSA;Ql)+AzWqqo#xkufZA3tYLFj#^#EzBS^e^k zm=1R0<#{M*6~NNtwH~vt`2VBO%;DLhg}~zt6+%2V*v|fxp)wBse^)&PH3o|3OcnwD$(>F#^*bcz4E zcK-J(G(AIdvdvv#T2wAZEISyS&n+_CKxBbVD{*3397gB$F)d)-7k1zMhlp}q>}jhI z#uZq}@3=w6moD=&%|xSOi)U+VG83K}3Jwdu(-{RPkgpv0#Y&gk1HY6(-T38(BPLQv zAhiT9q&=<*`V^G;FVO!2WWD&BQnfnJSjcZa@kPFX=NVn=s;qS#JhtOoTQHWB4MvM4 zKih>Fw_yL3{Q$+FJS|ZeHc@w!@7?oPj!$S8BPDfSP5K&}S-{^!f;czcQxabJ398AJ zQCWXNaRUut!Y~ppcEm`R zQwx4rW`e(rh|;VF1q%-V_85Q|N{~(wck9Yk6A+jrhK}B$wQexpef@t}d+&Iz+wgrj z`Y4<1S%?Uc5ZNn9g~-ev$zEC6va)AZl1h}l_sqzM?7g!$84-Txo4&vAc*g5_p4anw z_0QeyzP;bq`?}8SJdg7@j#CEMdQYC(eFRchhFW`b*cH|7Y+x}xptW<8|AVk#pqBkL zm}fvlO3DP)&8@koz_I4_kY61elT20gdYWYofzbJONzokZ;XTk5NkW{4Rpuv z?}S|p;qJ2C-_S#tjfnOLT37R$yiLn-ao*rgc+3(9(?80Pu8!%6!Abz1&>1@QM_re56+!@#)-S5TE$GUW20K5?iz-$|^ISc{L>?jE_!~6*@DS1z_H2Q#GhX~v& zA#WGS^ACrs?A8~M-i>6_Pcvxq^t&XK8=05?~5kE9`kKxq`eZ&9kKcgkj|rJowpPC z33LO8&M3uf)Vh^X?yVZqC|d+UWKYsztaTsWvY8s2OBCHNNcNx5eRh!rFWD#&w*$<% zH8YUM<)GH8lX`$)^Fc~Cpg0foNUSYLuAJ-b5If4%xZVwVgE3`Q9RW}N&R=JM?QRn7 z({Rt4vwEZkH53Z9m>dxAhfGRVcvZ0*cZWGEe!P*$8*>QWK1DbBZ}3Yd@s!JG{l@0B_B7N$(I!#Z)3 zgS8SPf|!+NE2akK*ZR4v0J!iSF2U4HN7z=Tz4v2SV}M5_D9zUiLNyh)=MfYV{QKbS zpiS3+pc9C5K7vjh_^9D=(YQgcbGY${Co^dTih*AL!zHNsaprJT`#^^i4ulAt2E!z1 zYSR<;!HLZX3RX4#97m8@!K+j{+9Xn6#hV#4%UUI$y;lZ!bLPoY;~kZ z%=aUuUMFCD(0V4wZ2VYh;$E;0X2-L=ZV*I7LMGh`H@H2o2@E;_zany2`SV?r=mJZ% z7!D6r2GWTsDNT5M1*r!A*a<%YBQ?aL*^dcv>fnTN3&nQ4_7mo*AwtvlXWG!fLjDUvp#6$B z`c!hz@6h%d(vg9y)Z_19AnGsuS*DxPd>hus0a-gs!Dq>TfQsCyJ!JElByhFV>@7DE z#-Q!sJ6+0cNPsM)uFGkLdE+-D38YsK5PvEIv4B)UDFa!k5+gu=L|JmlxD-?=OT$l7 zC}Q-+uyOPQYTk8&eu)uul()W~wnc7d+czaC;KsHF^x(AV%)i)M)4Hxvn4W*335kf; zRfkQwKiOon=34s^CUWYC#JbF(s|`y?O@Mlc)$bgxBg{kCjG~WRMn&v{e@G92n}~aF z{wHbrOn1FYA!P#0rMZ`H83wRQztvU4g61UvY&LjaSI`BBGCu&lVu9gFg+X9m-bHA< zt6Kb#*FD1!{wo%}>qoYX1o5PEokA7s@9pR1QwGaji1E2W?1o?QofrOHxaT_RL&w|b z^d4p=<%B|{U1FzY7xwzYmG8%99J=q$5_BP;d`j!?bw$DZ)&=n4mek4QVrvzfs|OQ% zl3qWM^zLJ2zi+>Fm!;oA-=)fSHt^L`!%|IYR|7!oP3t3_pq7%wb%3hu($ztlL<){* zy+!I+pW~mPTQFVb@0b&T-pW;E*Z`|H6K{WjTFY8D)OFFydDZhAp|AzhVglTb4(yfj zxO^_+QoK(rb@L6!P0>t)9isvb_z$2*;Z`DHc*mXjH6Hz}Q+d8ErYZu)-JKGHh?&OW z4#XsfY^~miIjaO8yRm;H$~xha@3i&sqwLc}*;^6{73;|#y>I`8*TLE^w$=R1AfoIYoO*@Kd1m=f8(9o_bE zO(N7OgRJa8v5H~M8fc>-x8yh9gvfSLs$JHpc2y^>qbdWh>$wq*?6gZWlsXInKd?tr zh_#*Wo?^6B^^LNOgoI~#8Xf--7LpAidD{MkA*YuB@m4zDjuY_Jr#2@_JEhIhUxVwu zfw~qzMX1EoPA@G(1L8n1|KLE}pD%!Wcm@$S(y$^Vo|>k>nr4=ISdcNN3mHKgh%-Jk zxq*5QF{WU(I&M~Hxh&+A`+Id{K(mo)-8@SWM1#%{IkcAkC~o|jPtMqHF8}eb+=e$i zhWSHV;1u{pX!@9;DM!$;JN=wqKfCrf3CcIEbA}G`A?|d?Q1FjxHdEFD!MuT19rh^0vQJ!fzzz-?&pkr?o&(8U|L)t=5uj8sV`Sj!CS6q-8Y@ko2}{S?q+}?RBXk}qH2KJB;OaXv z2bFk+^U@#FHqGC!_uB-@hy$mlsW#*7k!$%Ej6?hhhaYa{d^$@WB7H`)<2l)9LDr?Y zei*>Y+zbVAakfEbp?O9m{FrB7eF7!{eBZ(3h~azKUm) z4^>f4oaCqBQz4NL6afpN9GFH(zo49nofu=w1JU zYM5iTn&jt)L2?bRoD2jQ^D=I`e4p;x*-pMD!H!@9%*X*Y;max_g9AKF(Bb#5+#;fa z%nQq+xl9{g4JCP+c=W;BAPJ;qnQaSTL%owQ5KPfBZ0Dh_O`uw-Oh81CT3JU6d5RO{ zNr-Y;99PHAo6W5Ks-zz-5NsLjaX3C$jg_ZvLDPl4x7f6=HFu?N+jqwjxXBn2IS#aV zbt5!({MS;D#0+uX8jI0b2K!TXNUaoIa)ItIp7}fp=9994Ye*DeI13Cjee41=?x5!g zaTzz%6~7FyItC8ei*&gl{fji>Ri`|__xjJ~87I#GFhSiZkZy{_5lzOEf2KC`G6Ldl z?KNv`WYfZ*a8FP^%GD-zrb4~z!*?37j$d9SF=zmT8D%&&=D7GNL^kGot0T;iLGiIa zP09+fKX2tb4N^obpM~^vmOhdXH`$H3s)tAm*Z9=90+B?4AFs_g>9X7J$4r_18LIMx z27_-aUq22$fB|9QkU;f~@-zvJdVgCgf8O=Ma=8)&u-^QQGI5;%6`~j%G>PG+CHh@m zh3IcwI)i!d(`BFW)HUzt3qv%bMJx`!aSYjo!g(ySddrKPNBE-_U%NDmW4IG|mnb6V zJ&#$}_{@{*BrXl#5|MY+)j7SitF~2Gz-G7z={Hf(Os_&R*+6ri4>DiHHL0nY}pLa#T&)Bv@}?V@lMuIWXCM6lwQ20z_0>~`xFa}r zpdX*bonsNBuVr19Ck*1Im#> zFBCA;z}k(Z=x;zkambo8?%S`AYe9=+&cPdKF#vfJm8wX4KUfBIxolOQ+)t53dPOJT zn}GO3e_yMIEz~J+$oml_qOTG01uhNrqi|ap}&#Drv`W@ixnLW`dwN9~JwjkM$ z2SPP7zi7fDl8as081zB?J>$n6p}s$ZuT{(dp;u+ZD5C{JZ=ZurKK->^gXNd%6{dNa zLC1QhG80{5nPh~Xbes+prSxBH3XFr_FbX}e8_^nDIuH<}_TA}SbT}8JuwtHs&DR!0 zK5;0ItAAmAtU@)A#O7qGo;rnM+5qas+1=F<(+I#&-hh|xOU;{^29Ww^FEuy@PlJrE z#S`s(@Si%ZV&+SZ(+#iD*>sae8(E}j^h+nCy&G?{m&`=&u1|0R?rE3%5}Xl|XS!2O zAEfYiU{a+|jfm6uBGxBW7;+~jG- zE6fr{Y?mOaw9Q|Db1FW%luXIxcWIqp{+&?dwMSGtQ3Cd+$OC}uRZ{#YjW;u# zjO(lKk^W~u8qwDr%`~epP7<6oF$jbakz+1301gf#T!-zk5_xXBt)bk$3Z750`eF99 z8UDx(v~3;o8$`(hHtlhOnFpo=7L3kYWC{pR1@bEDy?8_>=~YTt>VA}atPgXFk2lj8E^f$!g3-iZWmdZ>^S0 zls~Lrxi&QW?1fi}1lK6zH?3PlnbA|@Wx!ur(l|MNUS!Jer5t1?>`m!JK4M~O}7mc?W|w@}Vn7O0V6=TLa>nNOAt*aKpu6A2?f z=1rFlz?m-ssLO3dC)IZX{?y&&XD=RODlG#YPnH;_D7bi?jJk<2Y%MQ!bn6C19!x^!|A zG08FJ&Sh-MXkCB2bPQw6*aL{6Fwec=1sg&-`EJk$j2ThGlX2Q6fhD0KHL9A8uQnD% z@F6o+G3QT}2lRAES?Rl$p@55{qgI-r1g*AcQfp|?|7gT_hUgklW53iJ0qn}JyZgK5 z(2R&ljiMfl+Ge|AJX?r zAq1UXwuCaYQTPX*0YM`c2B@1Fagn!Hy`OTX`9P@FWsl6lMRsJ(oC9v5=v`h?m!yo_IaKtHNG@6s#qKtw^j z{5Oqx3okSRe-&FRBDWV|&FNvTbOZWIC>gujPiumZAt({#33CRNwp3X9kE#HId{-U? zI+HK^jf7eNh<#dWDp3d)52F>lM#}m`;X_Uu7BYGUvEjH5eW(2HyIPJCTYQKt4_y1&|FlK zU>wCu3qI~_1yGa`>Y&RPDS1t!rKF|53Iu$_VY}SGu^T;EPeGQMi}Fh)80F#vH>jp- zxnEZSA2S~*{*i(|%67GyLz5Yq;34a3(e=UiD{+)=NKxH18AnD0`0Xi~OjK93HvjH~ zYra>~Vl|tRAU_$Xt0jH(gD|{DZR7ngO%3I;Pn&Uj4n*W7aVKbcpj4EtX+ti~Lz~fl zfmpMS9IL-FH`lK($F4p>_YQ_?UoKfc!34d=x5zu6Q8wj3>}kc;c&aZr;LEQCTv=zh zZelVv0WY|t`j||w`j-8o4~F)sAVfdMI*$ko<#X1C!Ib-Ru$(gs-R~D8lxtJ#AvIVY zIh$p(fCqDEIc@Kar(VL5fsA4@KK2}+@Vhq;Q~2MBG5^$CVI&Eoz9Uv_b*!}dWaQUml6wb|fqJ<8dAy{M;=G@7Wp<9O;ogpT>R5xX@$v7-E{RJdb_ zWI$~d?YJgw#RPYB0c!l+xu3X^Nh+LTdNj=LO;u=FP?k$g1lq=!A6Oie|Rl<6ya9w1kZ~F zg+;KXG!5m{`On7B-_?Xw=t~0f8>?F^FitL;!ap9toQ}52K;m)7IXF{ot716OABey| zo{nt?ym@~+C+_vS%4ChX?8Q%^-&8_7<9fPB9>ASD#l)ggK#SI()+A}cSlT6} zX>6qk+!UjRfJO2&Hy^NiJL!_%in z@W3fyw*EqLFCd8D2fk^@{j2Ugmp{44BBdJ$Wz{D8sTZM84+qe?Rf%}Kfbz6kP(0LR zQ(k=S6ox!s0M5jl$m}dI?b!nR+6**8B6Mw@N<5Dg;~g0Mgng{SFttAMue-HGb)vv> zTLDB^h~LRJXv=318UW2?o=GMulbs}4*iKd&Z}DoHJgw?-z=|LkHnC7IC#Zq>4K)2^ zbdQEpZ<+kPXKLD6(<^>CYrBv%ZP7svMEd%A^hh-|m<)}ZK@Wm5^gYz%%K|8DZ09>@ zmFc13gw6&PD0Rkudq7n1cPi*x#R3Vc1^66Bzyb4CtCN8M_y+2BoXo)ZdjHezpMU2& zTc&=*riLW&e1YnkkG{`+&z~8@OGpi+({%81vK9>iKMyJbTA>zu%#a86Fi=MIO?1NN zY=KsX@}NJEGqBumO2aQPNFrbrTM@DS6<`S3$s%4p@G7qfYz(fMS||Xe5{G^Jj_@py z>F5z%>1C-8A3pR>xCu`H#Ql=AxW3He_v@XRd9W=30XNys&{aTfJCr{fM5Tr!vZ81~ z^2u#TuKo|bGv`M(+0;EdP$NCLvQJcZotuuKo$91{rp_^U@~|J%R=?8zvMTY z2S=YAhi-f~@tm)IO;=tD`Kbd)pXqRZ)vq6_--F0DRcCwh*4yTGGI~AnsB0MU-Z^s} zUznFUz3)-zIv!l&Z`k+Mcm7KM61%Q+T+N(DhfAKzas59HZOp*Z1=no#qA{@=dce>= z%rT@OsQyufK=Q(W8_!$J`}luC#Hg&wU->7di+-F1v>$wtJthCBrg0W_%p{ULKIu4h z6p+_z$jac-g!raudW0Ip;qF zY8231)?7zQ{mvZ$dFTOzA3!VPfyn$FEQc0}T{5kpMJM~#et@sRx*@130W3b36q$e* z+6|L4jnMF8r`{=o%Q~*ZK^uyq0xnV8rQYlQpCPS`f*Cvl(VO*xfcR9-G-y;4zgJih zTzn$i0chPh^T^2OqmXOA;J%2C~83C$oXd)-Geb`casP=FcJLmnW;2oQr0v zIG5FEkra2Ga2}|3V(9L`u)!6r%ZslEK00gz3u9wk^gQwpR_0j!pFpq^0g&#OultEuNGFfNLy(@x zC2{|b{V5(&0O?YXz?1o@{^8cWTNAd2VP@jFZ|4nl1 zz16oHlouE_0HJ(qRi^n3eIM5PlA3sP^acic!#injnRk3v+N7}>E=pV_htolvp8(36 zW0Sy|_ z`YJzi_N-qsTkJ=SZE#jV-%O;5QV70@Z5^LkL(&J%S&+y*$$V9xzuTv(qr9U#ixbOe>o`d(Mx-UaD7Hn8p zr-;q?(jdh@bwa-@eVMEUnev%EC5%F69W?qLE)MR;oIA7mpiHH*2OcvMlr*7TKemB; zu6#vc6vM}QgHxxvQY>Hsv>CWpL^LN4)R_bN2)}jhL5ia_Dlhrn0t^hZvqx2dou=t5 z=#{Yqt`98h%!E<)t)R*idm^8BJB!2Ci5I64X>0`w;`zxecBe)GR}n`|zw;UR${{XC zk}ld#_khFU_*SjW5qJ}B#!Y0q%p1E;b7XoKqq;2B*;=S0)`vaB&qNu2?@e|$Zr@s1 z9hmL*-Bz4$Y3knhbva=UvJ(5Bu3-NE%^nmyJKXj{xdBDpNIH$_haI6eWNLLzKgytv zcj&4fE`N15I5FY2k|nOzK`U|oI8EG zWMQ8@O!gEfm81wx>H(4dHCS^V_INNxbo0Vo;fRLRJ5CskRGzZ&D;VBJDtXSY!hHS(ut=C9mu&+Y5TbDP3#TKwc{gun?H#Duj@L5c%h=H%@h0~s zVkopRXN@pW(9Tv4?Oq%HeLCs0kRdH4GPc1;GEr(H*+?B47^vtbK_NBh8`lkkBN!J5 zuv9XvBQm2nCGsqZM-YoMno)*tv$v+Hp`{H1zXl|W(9gBgatooTn;UEZvHB&ogC?dF z(ceL+*kOUtg0fEJW&}px$3WftA-oJK`4xpQ-rDF(#(G)MRWPD7VbTX_t(G!1zx8`u z3hT}U*%CYV|IvaBQ3XO5Fmb?OU1&yx+Y9)QoHuEPg4ke%vLC7uRiiBLx!^ATUaG3e z`DadnGtp>k1n-QMP~kR@7Zvetkf z7eABNh}X5r-LJiI2D*`jb^5;Z(SHGC7e3N$6~`61;09sLNC13E1KMPOjr1bmH8pMu z#f;eRK8f3N0wB2}5o82D7>vQ}&e0L|7w8}-6hfLcB%GkxL}sWX7@s~15R8VNWAlwb z6P7PKf~z9k&+K`_e#rH@BZqfF&cP!v&0g9A(RhaC*!_>_nH4{#dD)}&zL?8B+v~M{fVas9S;QQx<#s?( zCAdY}LsIG>yYHC&oGcLB2R=%f%nMY83L;Bp{QdGMk_^4Wd2n75Y-HrWhB6%DM(8uO zUHII?|2rRi=O}r9jn7A4N_M?r(kCx@L9#}|B%+o>#A5+dj0v&(}#PvaYDzHe=T!Kp= zU8CqL-S|yK7z8m3mm+$}cLDm(occ!+xYD-Fg9mIR6P33jE0i7hIF|OWl1RGxVP+9aje|Y^0$6}f6+43h&h*{O>fu}DZVboya!_iNmP)%m zOR7FtVsi#F%ipQrmJv>P0Ck8DMN2|WS>zA{4M(P)*XkpQ${7ImGQM(^MSoUU!D}fU zn2INN^joPDh9kIEmdkS!m3fUMN4r|#v)gUOe`O+OJHrsp>EeXfih5iLuRB_E=6Y$h zKOcF+CEF>933MopKk8%>&7%s>ofA8EUrPK5f{G*nD)KGiuwET}R8eO|(nWtx7bY~v zGYm~opgPw1Dk2|fsSDw0XhD1>U&YTXeXxd-5?|#z%eFF$-=u(EJt|(rtsLr>3t9~c z=+{Xkal--vZZ-ioMKSDz2k=uRSNk3gDH9Q0rj8Xnl0 z6+3DH9)DqMP8seVO!R1hUh=f!+5@y3fo>_J`{hF0=$+BR<3PiYtHG*c)a6atk4>(Q zOg8qQ#|VlJ*FMtv_5>A$ad^r`-)FQ2TIgbb^=qnI`BsIh{zRM0eO?kau4}iO5&RA< z2+5y=EOFEB0&&Mm zazLj}0X|m2LhvEL{I}Pfqr5Pj!HkRqVAxcF8-ZHks8sN=RLITmc@Fv4A2Y&-=qkj4 zHE)GfRM_L0Ni#&)0m7thPc5)QA~EJ2uggh^ZK*i(voZK%sj@rG)Ak||cu4s!o@&X= zkmWI3!vW7)Mnami<)Nuwi9wB?ou0LYeeF4D+-SovP-Ucy;yULe+mJU_VwFt7sXdx& zZeM)6AFq-$&~bH`iIDE&hYwGb0_t<$Hts)D{aHSt+sLxp|AAtfhdtRIF`J6X)qtXE z!AbYNecsERYtme-x1awMgGRine=(O--D!l2AODb0&*AE zR~L6>K0eDz9as7>ZBLpJ%Sl*z81`L9CZXv0y3LAI*F^bF2YFPpC6G{0kt0*&te6Vc z{{dVA6?K|?UW~d0+YOk#h;2_qP>m|IDS?Lf2I3?2H?c}uz;@#dCZDxL-fiS`FdkaU zQZQd5II2~o><5ce1?)z^Xoxe_MaMl8=YbFU7{R9<%Aw&8EcQ5=Y1knSl zbYW=YTacZSHkX2rMt7yN-M8}LvG>5=jYfPiOEJ)d;EHRq*SrNEQKIv(80t;q#)Rri z5Mrr0`*%aY*i`@2F1&j6su_YOMR{s<06$^(@6mK7f*;YTpdS!ix-#Ar7)T25Tki>P zxL-?;dpqhuE*{*@;LTiX9uM`5G@Dj=;nWrd$7{Vunx)oycRY?O6CNTM*;{hEtyv|H3H@<>r}=BQQ?7RkUD3_^Di1LIp-)hZ8?ZkH z@9AdBAq6oS<9kt2jU#nZ-|2If0u`r+ob8`6v}{Fv8S>SF;GHs$rv%lOjcX7>^{HvS zSIYW-_GUR%h{f9{5x+1nwi}ger%u@i3^Ec%+#^Q*OCO;?2CZW?DTnzMRwD0h2b-=F|rtZvZ*{ zt^&8vws#<(xT9POzqpmhPjdMb{)GVOW55$P$cTBk7akyP*IbtLR`56Bo>l)`k=NaR zt-yz?yb3g$43GU?OWw@8=(~tJ;NQ$kaYsO71KMn%l$6wWC?K^ifH_~Y5jf~^%$C=#bdA0H5$v0)q&*KJL|V8%U-Y!HHps8TxA?jg*!^xdg8Ax3~^){QLwz z-BeTqrccL_+gY@oLK9AT#e48AT{;7K-4OiN@fq+C;9Ghz{O@lmw#t8aYBn46?_26q ze_KSprSHFQ$r5Co#z@HU?^_O?(*Y{!Ajnbwz$gc;3w#y#9m*)RCZXWtYq5?EMzI-{ zIs#?Ay1f6Sw$C0Q#91Jq|Gc}z3k~M~kgngMdvi;lRj~Q_J6^Llg{FP7FM{>PYLgV; zh-uEUYCKh5E(bAD5Dp2;`%GV`%udHCz=VU2vV`(uka>RshiY5kM%#z{Uv*5ic!7IB z_MjHWc@Bf-y5L_VZXL5SGNK=_&z_7Bb!XdUF@{@X4bB44{MOpzUYRY_AUDs9>@DJ~ zcwzwq0*`RSFn%BBpA*`?`qi85(3;bm!og9sYlTZIA06=sE*qp@N9x@YNak$!*0lOW zz`h%aAVyzs9z0XKfh5X^@O14eXxSA$LFbt3o|>ZB-bkDzSZ$ze{J3)9#$gS5UiwQK zctDAol}?Xg8c0euxZjt0m9_`C;i``Qf9rWl&Me?yH+D#Q+x_6?p(|i39cS{{>_da~A^(-4c|I(Y&|Vy*JOd z83uHMF8XWe@x}k!CMB)*brQiz%wdeq%J(UcHfWVt#ii0 z3}=e0Rm$E$hh05KqM>8Fn1k`NJ-%f5_LY_Rc)M!jq0B$?Z(oUxV)Orb7n^7F(a?N| z@oo@lY(f!H{VtlgjoRgRQZTKjl5O%(yn)lIYQ(>n1`V}>vp>GQ@B7M)Tb9AP9KG7Z z(Y2pntEGM0V}I%*vDDIwZ-|LC(6JtX*sfmtFH9AS1hHSjt@6+{Ad9vSH%`fQUk|oN zWrU*oucz7z%z<5iz_059UyGpxy#;p_yU~l6Mp`yie)sCg(@_RwA)dxGX$UMFr%vMT_RqEGCRhfQ0ROmY3bNcM-CcyR2IQ1`I%uaI!USMK1rbhUyG^SdY zag$9OTM9Jq>6`pQq;}}lFekI-#$5TE3ovMi5RG|ayD8*I?KfNAW4pz}!40}1L7Ra0 z`#u;hm@t+4A=bV-;bz-N+>#2_t8?h|DA#r8zxDJeLjOwvo}TLyw8^|%_>NG`?|CDv0fCY`DN!oZJun(mt#ho2#|dEND1Lt2)Qp%_XtFFR8` zT#g8Lt1ir#J9E~$Xpbk_HaZjDTT88Bf( z-(;Ko<#Jiv=5O`d^E)uMagyB0xK|@m&7J} zG4?ZUmK@$LTB2zg{Q1E*!4uQFSYxkcEI5L&DiN{s^{;3J*@^9Bg6P$qRP$Odd|#|z za&P@TXu^Brwcj~$8=mMg|EDN;tWkY2VVD%?j(Y3bd6gwJd4Tc;GRsl|JNRVX8$q>2cL9X;EMJ#p3N{qF0}kcINKxBP6_Y*K=WWC4Q6=En@2S(}x9 zh0ObXi{6v=Fnn+PyJq@vpJTIzSwCrqdcalW!{TDn2sw$ZRS4UaFW$Ql68`e_BmMWv zJZNZp$O9j_N#V&q0@Ejq?hkiyfO{KICInpdt#x0YQ1>H z74bK_q_wA30_^Ae8X5@;>H5WkgqVLbZAP@hWGnDn^15znOku-TRDB`YuHyZk|5?Qw zKYCBki?ADnf8KwBj8hFT@?r{_Zwcz$<$l!rCCwh&^fofCkqqNSt4R{RVhR1X@i(k} zrcL_XCdBC3#FGVn%jG`UvnjQR6EQL=49llL=Qe1cp8c2xMw_vRENGON5G$&;&esZ_ z{;J*F@T%>kBEWOirs6T5o9b7vFTo-tsmQvgpCN*3%-a-4vP|t* z=0kOTrc%Mt= z{ovVtgNAe;$aKhT?g*fA-FdD{qjrbZ2$3!(<^VbRf=Ht~E+)#o-|1OH%_oUeR<56{ z36o^7LM6YY%==%x=f8azqXH|$2P;(7CErVgtdP1FyjY8HiMHPGz+ts`x4;*r_Tm^= zNHJHO_P56smD|Gw7p8XhYKUnU8~x6qN03hzNdF2cdj&3cFy0ejyl5b5#xH)}ae*uw z)N}&JIUx6n>uOj_=PLSchOE!Wj(KecVapa83-YQ8J7(m=37}trQLPW-{{}JkS}`77 z2n+6bREC3&N#S*=Q-t9GdFOn=g`A7ac8p~2z9NUL&?a+;2SHl;&y0JQNrN!f9T?tHnBXMc}csa7lJwkT8Cg$&&lWRlO zHzc!5%hNQs_LgmJuHA*;@B+jA&%{Yf?3J2t#vwhvzg$s-)tZ-K@W})opk5NcoV!S$&C-@nybNN)_}D;@2{B|om9Ko8tr+sHpV(S8VP?Pfi<`O ziaw#v&FpIRrakad?|lqTJkA^xWF z%9jFYy&7?zwLSQDA?^Y^gu_A18+aGYWL_syDM{G4RIQ`mZlG&pJsO?93*o$W1YO%1 z!ujs*a`5m~csFKJwVeS)=i@Kc;CQ5GGO?&xR@8ZwS=b-h+MeX7p*+sz>D~=86RCUb*LV=lv>)k}(SfrqNN2mzd`+ za1d?CKA&c|_0{_v`em|>R=9KX{B;bU&?0+>?dERK-mJ$=HBaNZ+x~TvVTP8qFVV>5 zr+NLRhg?GCZT;LYBdd;kv7fj*ZBD=bCPVmb`wx|e2RC~(;mAD5>5)U}Kc(9!knYqb zkWK5*GN`C4?BIeR(!5ZXcOH3ln)g$0FzEY0f8j}0SYR{BLL9gg+ee9!d)=~sR~ex-PICU4pEU-F8A@S6NEVBX+j zV@{H_Eqg-V`c{?F=}gEkSN78JIFPR1harmZ1da;jB!58Q!gP8pmX_H6h0fl^=_ih& zXxrSiJ_ zQ3S1lMnS0$25MUD`NkHju@$ePrqw13!p5VcHz#1Ph3cH=p@mz2!>#{0Y{${P@7#Gm z8HXqJVeQEs@s-=`(3mBogsgDMkrZE!;WWG1yf5INhJ-06(zrWr`4FrXdj87IzleEP zB1(}%Qb>)45@Ox>Q!^1c;eMbZ`-b%f1kLEcn6EDjnVwL44U!C2np6o{-Ehn^o5x>t zU1RE=dE;VGkWz>Z2TEl!L;hY{r)V(mYQ?r!bE;8dDA$Mz)f_%Nk>hX4%zph}5W#w~ z7j67|Gh-&5xRY=n(O3zQ!sC<@TW<-6z2{%kw&bBewkGGsP#hoQ?7YpK4-Q9}Y<6{Z z>`~P9*RVMJEE^;HRAqp&Le-wX&gA8BnrILgnVX(c42F2Z_)Y77rhCzZ;W|1IL&6u zhR>&+X1>7hYDn^oT6ab@+U(hWtV1rFHI#P#@rM@>jpsgPSS>J-Yq~10afq29mrX1B z_WY+7v#S>pE42HL$jTPB%syzIw}b#*c4>clret>D#LYl%(QQJ^ByC_=bdy0`$b&*->?2hM^03TKdL^-W6BFsl8O+U10%g zoQa(CpVgdCGF|#*carjvcj`sq(+aWdjk~9o=`^+~&u$t|gags(-sw;h+QMFvKLd%K zhd3fT^%aU=YR?u6H12h03c%6;W&jB;2V

+Kd-cWY$1v8d zjm5|Bz6g@uMeZVs(RGKRLcJuD70L6-u@~?X8}EHHNi~DqN2`aFOZ}4fbli#xm9LnL z7g%1jfRh^1FzU`Txc3FLjNkKxE;*wyznTudLCs{aEqz_1(g*f9VR7D{Q1z#SL_)~U zr6cS?ul8Ma3#r_jO8aCI#5f^Yja&kun1BDysQp=r>N<82J!p`DizniQ3#y+bDj62__SXtf^JiP*zu3DuvE<;VL&l_ zpu)zPWp`BLX5U01nODuxsueQf?|c_Bz(&BHl?5N0pOR$$^AM`Btn0(+KKzLOfUGl$ zJb88AYOIKFC&I4ccnPo;^-Wcte4WP!mTYx`R zTQJ?YaYKY^ty_hNq@vIuoeX}Jrd-zegmg7^N zjM^$>C;i!fHDK7C6JH~CL%BS7bZ3(AOH_COE_x_jE`rim$oa0Z?7H)DJ}rK@1l{Kf zdc-mZ$m-vD2Q;q=l=A@BySOv;{T`g9u|9uPv=5NVKq||L)u1`Sj=laDseD+Qc5yQ(5;`HQ`JxBt)H_K zSF)7R7P8o%w~e7#En(EmwjT4d!6w}k8yO8^3myd*X!2@CfQU;e;##WdH08T1-#QZs z3egyl>O6q7{a0o|MUB?LT1vh*4m#>&P)oTb6x+Bq~v!= zNGFv$Hk@U;FpL*BrggGBosL(Cl{g*BCT+{I=e3+&nK}GxzaTe!0=2QTQZ3|mi7!r$ z);-!D@s0h;>4u>zY;ah2M8_)}a|{g)ze8W`uH?{4VknC{*a90()_M&k2e!_@J^*m@ zX?qnS8HCwHYERavbNJ|498^19X5m^I)XJeg{bHrklbwa4jEx7rt<%2D{bF4ajpYt` zp~rg2JtKEdcenisF(GH1V2=kS?NCqS%P^Cq-WHEwI7I1+1Y%#Gpsz^Ez{r-cLp=d^|EZRYVD>FXaaQ`4=0(zwD#ti>|xk z%P`-CiV_5N7cH63HDtQLPu_dok_1W|gB+=9xsUhoVq}%SKB&Og`lJyu98r_< zU~LliA(Q53sxquVw;ku)U{qh0dbxdvs`Z-1Z2`xf;w}Y~Jdv_}SbUsWA$l zkBNU($+Y*Vf|TA0NCn{+4kYdhHZ>OjNTI&ayqZg_hUx zMB%-d7klR>eDCQ8bfd;0`i&1{&My=6CX|>n7cJio&!1(}FiVUHM6#?!Kr6rq>c)~_u=3D(4a$i&P_r@t61#UL=r_K&%_rXi z?d13T8U8(C02}r2F6}(H=(6Kx?(|>RgODZHuGt};cgB&Mwn*-Nl?#lFNIN<_z!|gc z=d{@&FQcMqJ~&y}r-n^n!=7uu{EQgas;GX-qW`$={6%lm=z@B^q}@F2SJ&nU9?Hgq zuIl)p?oQG{0Y&n&lZrb{K3^g^K3{hn_m1m=_0N0+rpoU(4zBH_7RRxL%2Z!kOj7SC zIO306(9Xr@F0YES0Oskg+==i0nb2QkKT_b+NhKc7zrcVbvi5%Gl{bAGtDlyb_lOVM zuP|7QIFe~Sz1@GCS&}$M|lL>AJ;bpNsNhfq- zX{$aKHvu4tU9Sj02&T*1nSSg4gpcAA}-m5AV~zXzUaou7v9s* z&5c_|q??Gq5qvjZ7g~;K$dz=^Qxjj52l)P;0?kFvn+XAMZ(w;$4n*$%FYmUusV8ER`5AK=a1d9@6}Z< zqiTFheZ{k$YDk?LO`L$vdb5Vh?1h z>nD$sW5rChh2$gfXe&5=M9MW)bm0e;cRx(c^ZBqhY8UnwnyGg@0D0B2ldc- z30j337}eNTbG*Z;4lwtETY(joxsKfr-YwwT%|5r=&G!nMaGs}ih8y6~Agz-Sz;(8} zL&j|gh6h}ZH%eH6yo2jKX<;@$MLE7#FQ#vJ9B(JzG9SEo9sH|y)0}CfPwU6&o3!7Q zAn^c<>k_}~ez^HW0zKp=5vwD>oLrkHgm2$4Om(*%+GED<=^RnjN8gJgU{`*9fEP zA1-JNylW+UHEp=V?QUyy7M)SnmxZ?I4BgeI6FW9#?d$ueh&28C8kva#m)IThKmD;g zZdSF`mkollJ&&i`T;~cz)@cQtSWNjk9i>ISs8_uEgYmN0vNc4LUZK+I{&ns8gZygp zp7rLv9T)wphc)32cMZ}}=xC5TqfyvYQnRz~+C0hB?8`LD{1}`ukU1l0zFctuquhSQ z)hjwjVW|I31p6<;=R_?IcejSqs7aQ;YvcLz((G82>wr(dgygo?x4|He^6$Y69a~ay z7d;L~CLV@UtNj|#@rZ#2x9G;t`+N4$nSPzstAIiB(dVsiwp5#m34rv^Oy!%cc+ir} zQ0;8?3)Ciu8nS~`3^}{ z-m^G$sxInf&gLsr`;|NQo-;ALyuuL*XHrUuZt&!umb)@R7iaN4__UB6GA{8xH_tM9 zYySB&hjMx5y7l*Ux0l+c@0JB?4?OJ5u;|kG;2fomxPRRf9eb~~o}naC1I@o<*5_hh zQl3pBT=Mvc>}mt53rh=6lAdWuY9^Jdr!gJ_llJbtj)P*LNGWDlADB)^n*IEKyKJp| z*>E7cUG!J4NhI$|@1vZLXeN`Nue#LiPve}>5s44tpQK@hs-2_ z$bPM8YSEj%ow$48qbGQyZEr<<^FO*39t}Biyje>8U?Op9#_)Lrzp@q@Xze_mUd`xx z_JNDr9fI<=Zr%Fp&!+s95#%!Dc&he>#i8LUI9WATJ#+`nS3IJ@GV1_;cHe#b2VYwp zmq9*_Dvt2I6U<5QxV(f{ZQrvT10L|dy+lOR75=VOtjhu$s4#tq$Gm#VkNQ_TcPDV7 zzJ3Sa(^()N5aC6+J%v`-k0V*Ng0kEs&9kEzwMW2&Wa>z{5{BV@zkF;>;p*NMnjIdd-53780r{gK$=TTcE~!w`Xvf0 z*#1-A1*y8*-qX6Q;{tEU#W#_On2<6a`z;?M)|noef^g+e7`KU95wVxf1>Dxg(2SC^J%dnF!>Lw|NJ*~ygpt{f+8*J zI*e<%JHj44kN95u#W*zMMcsx*()b}%u?1ydaJF6FW)+@K=JSKB; zl4=K(cDIB?=*1LV>%D~$2m-07&U@+BOajjJDm3l&={jQ?6023eh`&H53_+-Q;=Bi~V_F{*xXja4{pe8b{I8X#lQPzLk@kk` zOtG4gRdLRLVRy&P^PJnYnDU-RJAWaL*S{z;n;?t6BGendXw0_l#bc4flZ??g&pM*-ZPIRNZGGGpWtL) zPQt9VN5QNKIc_+*TE|4&>g#L^kU27mlusW$KfTW*)^?<-@2=u0;$&rI6|*{V&+fK) zpKVoo+HQ&hmsihq{WiZ&@Xc)1z+k2Kd9lI<2IT2L`mVJ~q83L-^&ud_W%R$N)8W-l zsV2sZI(vl;VvC&$`G55BqpzL{ z`rjls_4t$*l$Os#Pm^;RUPrr zE%>PlYV=P#f_2nN38Cbt5~Dg5o49wcndB^XwcD0qktP?uegFL4Yk|fK6C$#FfO*@f z5!@L{uhnwsqXF1O(XN%b1Jccd5zF&S@& zw`Q`JI`8>nDYMn=%yeaobP%kWk)&Wt52V)EDK4qqj~HXP$Wy(Z+o6qeBQ>9ZoV{M& z3kVJLSrN*5wH;*Nra| z@UQ)v(tK=#IR0i_zHh~0Gx?%#GpPvaJy6d5^jge6HDZ}9wu+#wVaiE|hqZi>?h!EY z#sZRlryCzq^2kSfs#XG2ZSRHwzUYSJq&(t$h_2-O=+b=xE)i7 zXOg{qM%xxt@`10{+X8Q_W|!g`>yc;C$^pZUo$FUe9e9PU21#1)hg*4`4+tB1Dy?jJ-x74Z=7OVPcc+BZQM=tbrqq#5}6*+n=k)Gd-l0Mfpk zUVV1vDfCzw`OOPiIu%F}->Ijr+uP}DuoHe=Ez{KMPX603B4R=bnPx8I+=i*coH~!{ zdIORYV(w8qFCG<z20nRbdP-cD>?Db47~G|eFcU2yDT4>(mS<|M7*7djraj*p z53_jTnF%Z#wN}l#*h|;w5Y`cZFI2jP=?Wvm3)z@+fVOS2_Sqe!z@}T@`|lp!W6bTi%-69Yu$xXju8^oI1{uA zAe!UhNZgrk;p;ZKaLS~Vp;#k3NJ9RDzR|mD8^Lc9evK4qjD40&$KT+SIk40$=!o_G z9urw_XA^ea;@;;Wj`o!n;Ev>+@Ffi`^qgepTnhrMm9=YQiNGg@_Y&IqTefMz5K5d; z(WHW#!Bfjgkm=aVQlUR7w(LcH$(bB_t-R*n?-m~oL=q>MBz|{~gt9rpWjI?`8@rm# zu;C3v*E$eB45dGrL(tYgV&Lk&4{q*>VbUjEC%;i;Q_M#1LD)`Gi-x$M+?3(sKFDg; z1o87@gj0{^mCeEff`N^Ci##;xvt6BKUrt|HuE#dkjhRl|7k0A-0}ph`;(B4~jZo#- zqXNLb8$KdXRJ_=_T zhIP9>imis)#c9>(8+A~1shbpJ9iMXZGF1&Oe5AQ{Y^BWNO^)<*!1h(&gD{)dqt~;F z-Go-(hupkF?TT{Rv@=Wk)63?M`TC+AA#XOE?QyM1$VY}6P5Vo_C~`KtKcY};lizOw z)w?l1JD|qNAN{*8s0C6+@srr-)XsQ!hx7}5Qzz##Z)mfsg-aOHu{ zvx~W+nGq+ed4|<0jRt~3L$8qW*e>uN-`WPkoge_v{;Wr?-hg{xX5AhFYzEmw5jpC^;IzuO0@rtyAKCx@*K((9()-{p#Jkr{NikDBldBXR-9b>(c zv6*hLlN6V4mdn|vvv`UoRkz?Jt~ZLEi}f~*U5n#}<%*7W9 zg24uPZ8VqVwkFx^N7SV#fcdD+QT(iczuQ{*zPr)v)*wDuvH7gKx$Nj7K%Hr>zM+7d zzmA`~TH&?{)urkt9Bp3VMc^nOd+$ZQq@!_j5zsXo6xnYb_HAoZvB1RbeMzXwg%YNw zgW&7Ewv0ELhZe7c>X79NTyGUdE&XDV)NcFh+w7NO~3t6qT|U+S%G zks(=TPsZI!MyVffV22-n(FW1Ycb!}fi&7;BMR_&k<^3m#dWwuF%YpZvF~JpP6k4+H z%j~kA*m@#$Y>vh9VFJ{`ufgEUaw52+NA(rfyX$Vdv?^M5fxF*ee9AO_bB#imwi&^0 zN$PNkwWN+cu2V_8M8@lIqUZ9T$|M+DduR&G-j>7P*B=MmU)X1s5n&Q0$Y9_mrLvYudbL3ZO zIVU?NkPxAq3n3pbg=+z-2W(w`)zU^GX6CYVI9+XRZJC#KmS5)7)RfxiaZ3{-6rF17 zOLNX%%O^+^z)-N$4QVRHtE^%5@N6$Z?Uf`YaW};0saE-;QYX?`_7}k4`k?(zf012s zVR;%&ih?T)`Pb=`>+*J$&KFLQJ0XQ3qBcTx4A+4y7Gj-;nZJN3Q>{+lEt;xCx^=dv zJI82*AjY+3Wedisq$*~M^U?x;^*Ib+oxLmQi|pJ5&b52r)2l=qD&Vu>tK0+mEnns`S5;FaLL>xova z6eM%q{7UWn6)fgQ^z&FH#R#P@*we4g*B=c<5cKnl&BMI9QIb=v-Od>qr zNF@@&7S`=^DWo-72=H1=HawTb5sIyQTP}xyo(^zqP^<_>-;|8Vp?^p1HSvqPZLJH^ zl@J|O6Cur#TM|7JSZ5#Z&Yrxsy568O$j>*4l@!&9m_KZ*L_A#4#=~#xfsS=2WJW`U6FzieO1%<7%rEwn--MA1XdzeR7@C6H$$wEB zv;RVPb=^w!$=)U3wShEHB*D48f|rIyWNog`ggNPM9!snpBwM#1LUpwT==0qoRzo~{ zfqvQ_$|nlIL3(fIp@Jo9xoFCEfaKS7K-(vd+;(;^yBv!_PWZm94Yz+(;h^#;B! z!kuN_X>a*-D~>4e{UE6>Gcj^ADaSVF`Jk~I9Ss%s%ai?Z)AeH8R2GmKn7){%IQ4T4 zquD?(VZX-5z4Zh^og#-oco%xlI2blg`$BI`m7)K=q~Uv|vUrKbJh{gGASi!^PG9T7 ztm2s+n=+Mboe2A0wRtS(1o2NEXY z#y4HDkH4+@!gSG@g_qdj+VxBYLC&CBfsa4T$=GKAuo{K*v@Un@cR6jolM=fk6LOWx z^gG<4_fFRqa?K^A>X$Y2SdS^1S~uG(!h=%{h+TsyHN-jvmK)eSM673snbd!z=sX0z zn(yq7>};;kwY!*XIj_yo^W50f5Az&DSQL+wa$OxIYAZ9D^hRzT2{TG?t$FO)aaCK) z+$1_VCdiIgO*X~Aws8`~RR`*KEALaKbZFtitnAdRMeVN$JZjj{ZT0)3{XHTDp|w8} zp|Nj$2`Mw2S~o{f_z7P(bKjX1WATyMs<_F;DQFoB_2!7kf4i1oz3|eloQCIx(Kj~k zgipMID60c|n@O2ru86s#X(RP9`Q!J){lXd}_`J8dU1K+ z?#jeWSW`xff(a_1t*8{Tk{008n5oXUL{{+!VBX#aeqyUfsx2{b^u){<)CFV_T zqdkTe+4ocv^<5ZK+WFq4X0FHo%_wKp&JKKL=wK1F!@6EBaCt*OMM#9dpOU!?&EP?% zve!UHBJFj&NtTtC_}CD4$4@2i@))`^BqZI0!aHK#wWp|43HoelnwKY+&u+>>!wva) z%q4Az)aRu}hF*}|nZ90&+z53-5L(47FsxPEVtYc^PJI}nhmb+8`nOc%4<cQ0Y!sZ#PO^jWjI;@6;4Q9QQj(9f9rz`Ml6aUqp=u={=q;?=*y~ z^VmIxddD~AJ_eQ4yU!|la&5CFAH{M+jL$43JKvgK(j-3E0#n$*o~IwqAvORFu>sN6 z68oD&XqgWAugjKa=-zHuc~)%NCHu%Q@e6ZUXx>kj<54fr5;HM2Vju+%ogtUzGjs^fHmhU9tNItkCCyn!Hwliq%joQ{o(6P?AJ>Zksth}uXgtn(H5>Rx{k z!U&N9aG^%-eybv~29HEsxO<>OFgGV{!^EL=)zhx;u9h2DzCu%1DfZaj34+1tgAJIQ zqc8?cStiqky##XToosKiE$1)3Ci`j(t5lHJ-)IrZ3+w)9CMJ>9PA1K>OxiPXD8R##Ai!mt zVeJ5XDF)O~UmL{_0L$-f%|%8)irv*^-V+Q$1$4LgZF4CGeqlrDDAQssXv=4`lg;Ug z5+XU&niCNMh}`?VUP=UUa6%bro|do-0z#slZf(cp_1S$-F}Oc2fDwo&5TRve1t2mT zzurCJy%%G~li=h-A3s04oh(g@^Lslt5dvh}e=Z&J+}HL$aN)VM$a5d-&6nmSvhMHc zzgyyk1@d&!NWm5kD$96Gz9;uP3{4puMd*d@=YQ}}Z!9#(gfn%eE+Ek^631Q3GYxpf z6c+m+b67rG`G)H8|Cv@$l4Hgv#D`cqHbp6zapr65y>0l;ws2wQ$PvLW2p2#lS&=y8 zp=MEH!UxAERB;R)LI~}J_dcL18vhr#d-o8yyB9iNWAhg)3$;n zF|&Nt-=d-?i~xww{(67`$Y99>0CB1EZ#7|s-U#XGK5^>3^Z$&)7`~n$NGV{upc~>( z8~IT)u82L?miRQGN}Bpe)#;f7qkaVaF$eum8Y$sap;mzWb}j zLCl`VQb=;}eSeu6q)3^flds+)uW(1G#At*^1BfgO#K(~kzV|DMNCQ$1-`f-}Sl`T3 zo^?d@uffJT(d_S)JKPimW=P4QRTXxJG9C-?Dq-B#op|1@#ErRq?eR?h*Ajx}LKrue ziT9_R_GR2d9A+@YZUmYj)@%*bah6*SFu;3XuXhVHRg}ly5P5|aBVvTygdPGJx5gh*q@Er+@J+G}Jn)VoKa@Y<-bO%eXN(6)P%ATjM*ZF?w%4SP z-O#(GyPif?BZe>YW=xW}+gz!hgNzMhn5{X}6(8%s^Bh4-fKQC@pD2UfboYKMY-Rpy zS*_KmD@=xgQzZ@oP$H5CceDsw_1bZt5QXRR(JtKYrcf3NzsQy#m8Z4dZFhlG$D!b5 zz<+E|_*t-zMc4&`@cY^D`+VyHW_vXhAxeBWsF93TqigW|K{O=Cl?2$_HK@u0^2ziIy+!+q0JyYi8#! zpNlld0)8RpT&>i+`}TS0!^|yUfd4YQgqF$R95q}#ongp_501U&CQGa6{!tSyz*%k4 zckY9M#f6^SCFACsXimyAkxdsqojsS~L&>cDAz*fy4gs$0ZBl_2QZkl0!)sqA1Vxb< zM?1i|ap8+#f%sjgoDxXJJebdGe)s^#;JzP)1_GdI6c!oG#yupa9@q zh?dS|$+ z+<1$_W(kQ2I8dmo{443U6+933LulmPC2w!9mVGlI4V z?PSvk^}^eBQ2dbhVwR6HKA|xoi*uluuwobs_q^<9c3uT}uk}HCnk~qMj2$4ya}uP{ z=jzp9R_32#IQ^rZq7XgzM(reGu8g1h5lNjy30?1 zEar79mWqpq@1CQ5tT(acPJ(QV_DR)j<3cy%snqjfW=h4Xi>Dy!VZKb6_8+q|tQIZP zEGf0Tg@5ky^Vb?&(u;Gb%1dU`x40)tB*#46`bQjH6OpeVl<@ep?OYpw!eneSY(8jo zndRkZs-w*|_Ip+0SIDA%#-0Lq99D})YI*hG;J@u=T?>gc{Q;X!xU_MK+u<-nW ze1M5(P2aC)M!`e@L|H>q%0eYJH5>y&83U4m%qCI{W>QRM&noRsovI8P`yIE#nZ7+% zz(g0xX7aIwbxDF=H6yy@nsZ7xponNKFlmt05#TYZUzOA*Ukgdv_{v#MsCAn@^a4!U z<`xnkyd5UF^|Wyi?R|L6bRcL*$HB3!E-sI?J8#~!oy@!~@9nJ39|J~dX^obTRc)e@ z{w4vt-|WiHiuZb~Ufjn&@B78NNPB*Hu3kRN_PogV=K&Ohq7^BP#!2puZXY#i`bLE|7ftDAuoK94&G8X;sx)n}Uhe0_4qrs8pGAalqYQw%*p8*N1pg}J)O33BGhie`-QvLV|KhVI5y;4`(YzHR;oCA)W-$8 zsB8_hI&J>qm3_I3WUk zNMd@GvJaDq!yl?&)KH)^!OT+#(YM{;_Gs07-Z|F>yqC@PuDrM}v<5^J4@{{AxH^}Z zsQp2=+lWC|=HmN=Sp^uAV@izmi#MJf(-^XZ$duE#`%1kH-=6O8|3Mj25oJ_f-;*w2 z5?Qn}UQDy`tN!ts5hQ;e>Ys* zo`_o6BNl(7gA6P_?N%!l1*h5fb||mqBDKaFK_j?_ZUhZ8>li7w^({}nZb=S{Zr#_H znJ+u6yUI_~ax`kcszJ`fSbxH>$7vokW1i|AcvYXS4L-(kIk- z(lk3Ngg)^};QrTRc}QL8s2vnD=TtK5P03xwzIro7@FVl2YFo`8;s;ejN3xICFst9| zbM40lFbH-E3L0mKpCL+`cq~ON_jCE&Li;PJyg8i|$LA0hr@Dv8k>`D-JJdQ#U4_B7 z1Y)&SIy)w}IW5|nUNPo5U8zPiP6d~bZPNVv?gT_wvBw>k&RA{wCd1jUE=6C1jRJ+M z@`8Bs;R;(S7-|b#xGJ($(Asgs|5+ZxK$a9Xn>93-3=!|TZ^MNs<@W~?d3U&Tdsg!$ zJ0y2bCTUn9U>wSe==0_LH{y48k_*==pJ-cFQ>dLL5{peP{2iq~g<1`MLZuCI+_qST zjXx_ftDrlcA;F+^)74d?=15OngiYg*=UWLCSq|wx%8tBk=Xl(gm}(h@c<;}RNtUx8~@>;W~-?&^%~6A>22o? zl*4wTgXGdi@Ij-aKx**Y`zOc-B)1x$FUg6B-q9=8GCrwF60S;vGdvgv9 z>Gx8-iHAlEyURFH&zai~zIEbxu_XjC%Vqr&hf@wpp|N093k(s6`3C%u5DfETQqK=I zF)z3Yo%eB<(0N4JV$cP5xs=N`^>fPc-GDbu)46(er^dVsW@98}WC`{qXILD9J0G5~ zc&N-!qa*clJFf&83BOVo`<{7^gZreA?X1upwD&PMiTJsGL68L}$m;uv|XMdq8b_y_2Cezv5K2Z~ThkG-&m-+wn=#%=J_gh+8fU7N5E{_N%4~$rLUE9vcOzl&G{N#caG|JQ>D2!=Ry~1owR@z4u==;NMekYlaMHY;=WYcBwPCzFqCx)+`&p*I0qI(D^3X^vjk#ccbqyK30cv$cV}#xiWR$acs1$*m?$#0xSZ}j)}vi5m@kMn-NpG3bsVf zFq=!3f|f4|S;2+&^h_uYvZ5_Q=xo+kBK!VxO$XWB#n+*ES^wzvunc9;K)q^opHK~#&K0$?!EpQFFFt*z0DfCa=UQpOkB#UQd2kQK?XF%%%3r>e^#`4dPhYUZh;26V93*O z1d{+aMXmI6sJipxGlU79k$CBl!t8C_>IIHfS(BtI``f;6%l3?lBa{w0B}+xLen+Vu z=fKGOWa7thi|RJdMeNl~U_iA~+30(G<)S-R*BbYq+UlPbqGPqB8l?TD85mJ@nKH8B zPo84@dE0kby-(2f_2-@yDd&^gCx?jMv6}hM_OyLe7zkDF4plF@nH4~<(f^Mg90n_o zabNw-Eb!^YPv4V=b&0*7I+%(r)X_ecq8T{{A^wDqSst^_N87Fjze=*Vand(#{u6W^ zZfyp&kgTcL^WOpi>fr-L58i$(#X%xWd(#19QGA)GYr;F~nN@3Ny7L4z{CRBs+9e`P zqizcCpYxxs058P}hWcRW7V*(wWbeV>_oQEG4$bBoXP6<3bnCmj9Y=cM_fNuNq;XbY628#vU=p78V+H@-(f`~2LsmVCbd%N% z!%sSSylT9VYROei!p|q%o~CAm@8-NVWMZmT?!l1TLC7sXiBZ>^kI{ z#rYPAzjHtc9NfW!9ywax_gdQ$bJknTCS1b&LWFE+T{w*Bg z+|qzA>mKeU0OhrW_$iZj(?fkv9rpeRhuY@POG12PCp})HI(~ML^%vYtO?4rn+ z5+*+WHg$(i60kHRBBl-|EhT1q^rbL-^ii8)31RykA= zC&Ee5S7s+!Kdy#45$4VXeKUUND&^ddZHcZA`-Yh1-^-u*&)N^zi4qAwN(KcWOS)>XAC7-`|XeX$lI_BlJOB)kBR+Heu8Bo$XGq^SCO`!OxI*~>rQnCE_M;bnAkUV=Lq(Y z@b`)U?@89*#puJuqNU>@rIqs<91?OS9M6%`0tZaK{J}|qAl#CNrTZ6UTFvGpoo<{Q z>SBW~5Z(Jn_e@qSv)*YJ-KuT*k4hb^bYm`)!wRIS4bE@6mVH4z&}LMh35%qT zZaZ0IB#$tlIt-Gj#_CkAf3lRXVW!*d_V00Wu6;RP)*WjK|3(fK`cwVVVaS96hBc8c=(k!ZpZ_eOr z>_x02@u_Bims;UjU*YS<1Cej7N&u7jaE&+ha2xq(!3Y*I*DF1<%gM0lyGyojG^7lL zOr@B#fJNdzMgtQalm_E!CxU7DE0RChM{O{#Yu5A6(u&hKQ_vQ3=;^J0sC#ubDW^J{ zu~~D~;(0MBe>9#TI!Hqf1ml1j5h?mqWxxWDJ1|QfCYpLkN5bCOW_CJo$Ic5zvUA!p zpq{Y>4&(orihp`b-t4f0swlbc!w3gy4Q1x@+5@XLw1k4=STg!T04KCEwoX+_-g zr+d7o-qx@+;k+!j5h+=udov%BTYaz6i0$_|ySp#LK{u^`O8T)s$)5nh_0|gHuTZ*R zR!DpNcN+uo1_?k=^3d2keD6Eo!gunma+vL7gJ3f?;a_L6@NpF)%9RqTVZ3=Q6U zTkThAWcZY_6wiZwM26qNSkOl5BL3!3o-Z;JCJQgVzv-oK^5t?@Q8mnR-p8B~BMrO& zMy>&uh)G?uE-0N2Cf6H&na>92uGOmzjNFCdwU>7M>jU+5cb5b~*6L=hNsjZ#I0|Oe zL>Amf>zwIn!hC6-`Z@Z)!c;J%d}qCLqq!P4-QNDF6|9E@WJ&c!J!1VnVZm&O{M3() z8^C&Tu0-fKDGi;Ehjc9N;fqudd){TkmO>xLP8rEg-DZzvPkZ*g?&sJxjl6O`L89SCI3NP4)Fp$upytJrU;+q%3fB{Pe;?~0P^syy+YV;Y9BU}5 zu3DZH8f+?&@@l4gln!kM7c7Zh0Ex*g1@8S5Qx({Io+Th#Nbrljnq z?93{8?9SnidBKiF%<^qhL`^BA*m)_?euZ05O_eSKRmgbygC0BaAehJQ`6pe5-*wkQ zxTnTO)^*;lny9=}0LRV5)MNVw|Go}oV6tMq%lSIwz??2#wOF%#$J!X$Rk6wAIOq=H zKV+)wr5H&3(4oV1PhSf?(quVYtbE(qSp zyC5z6mv9n#hq|W;{~F?@h9S%#W@^sfhrR$-CCc5hp@tu$3=sMf^m=J~*YzS&9()my z)gLqWa1a-vQ%h7Jt2MA2NPJV|%D3NkBEJ zI6esK!BcowemC48DU^oI4{u81clbual=5leIs+x3jO_C-H+KKT7)HlB*3JLg`nTll zy|cmy_xOb+cgSPR)2w!?RqEI|d#%(vM)Z;GCqaiRI{j!j`>4V=Q#tcptbg*#7aG5Q z(7PGC?yV|I`csAfz43R4imMW)y%#52sl5cQz?j)l*CvMrz)N{Q!s>Z=;@DdFylTNl zUMq2vpI`Ip`k50O+o`O(CpJirUOcB8y({EMR^=;Pux`hj{={+~W&eozsf$kjvW>7# z`uqNl*&R*GY>z&Ur`HAtvhk{#%kOiR;RA`uJN{F@%7^+1e&$mywXW;bbwrg@kg`;L z|JY`twqGEuzOA&eVb0yP(RFri;&DWj+0Y}7?v1W59&;6-w?P-l&fIy{t5K%avpNgv5qZIDOArLsJWC4lS=K28me)&wA8Q zlr=uBrjZN`x|{w>11?0HcWiqYC0yy+w4!uP#e``2lRrVn=I0?hceL~;4f)?3H}VPR zh>zMI|GeE&)wHYsZ0qj$mPc?G_50lm)4`YR-KK^OF^bJ_3@XYeer~SMeCha*2Tx{^v$LX(hmMBJ+krCW$HE*xB~l;w0Zzk0=;HM&PmMOCI1Wew5dWbbhq*oVE+` z>3kP6?yoaDO{SShnKg6I%NGtst#fJH3^eyyMzOgDHYmxaP)N(Q*+Fu=Pt z&OZyKe(Nvi@BcKX`qWPVg2--6E51(H`f8(}0EQg=N7C6wrUq_oECt@-;xoJbf>Bau zaGk$P8Zz*fL?g)!m;A7l541PdDwbzT(!E?{a^L`F>;{vcM6BBlpeh`s2iF~)$)1v& zeC&a9^xWjuV-;=eeR(w2xar!IZ9+Kb+Skc3jDEeuz5ss^E|rk_Ae+CUlq(x)Dl+t9;`?cn#t z>c2R?<}lY8w2x7Ptk5h!9lC&0LcK+OYI)7?DfiD{|8s9CgV9e&XQ{ZVAT7A%Ik{P7 z3jZL)h;v6$%FnHIqr|8#`o5^dYjs?o>-G7+BnoJxAD+pEc{S~;b}h~0q}#_R5Ncv% zdB`6#IFq)u7G=KzuA6j8n@??L5V6Qzt2eTLJj`G0P`hQ}`^G6dp+17(cuoO4$tNZ%&@qrR1}?+SB`onUk#cu`iAi3z z#nV-4l5pLq|I5>A9`-B?fTEX}OxT}31;kkjw*weysI_++9E|Tg)XCHLfFwvkQbywB ziK28z%*{BtR%Nwg3kd9Gp`gNdr0mV^ z)-VRWR(v?$&J3!3KD&3@Ya5`%ZlB~_e{W*NEV|CGEwhc)1dBWD7dVXQ^VgiJ9;&uu z3_aG^b8M?P7$s&v_?g2-xB-ciT5JktWU4qRt+GR6w$kHf@7hIxqFXg z7AfpNudjdRdXJLDx5Pq#8>#1n{5OA@Bec8*(*r2`*Q?n1>}2Y+Y#GGB#e8YK-~Ji* zkL5M&l%xcf;}1iTO` zBu0?%70IlXiUCXk>0GjMy1_pv0I(yxz}Wtx?{bz^rMlAx9TtVuPd5hwjMvOx~kXEz?B%WC&S)?|oCsOS!g7IWfH6Gby&-NWTsDZcy8&x0YTNUYx?S{*5n(R6_y_!Tfr-TGYy=S^8bH*4=&X zm)AwHCGBWGx`pug4OD8n4S;Ipax|DHsTQiIHRsxV{)B<8zX`zuF;j@a1d&3#RW?Tr z#<2ePjDNL=&|3)z;eN>oIM1J<*U8M{+tO5{cymjaYH*b z8gSv0&=uX|Pe}>UF<(_#c7AZoxsfazyIDtdkx*8g?|}%$qBmZg`TV{l0xOG9(!QEwJau6g;=>5AP@#i(9LU4-@3f~mOS+87-Zu6;@7Zt;2(4q$iJPK=pRW(3y?(TGuX%HO%F3j_ z`PAxZ@{=_5Mok?W=+&3E{jZrG8kPZc-oSGGhs$lmedhKJoklI|Up&l%6^@y96(}n+ z?4D;e+BBW|naoVyCYSr|dsbz;n54=>n-b%!%4HaOQky3&)83$pI4l_M+5D?v?wecD zn=W^63{w`)eYAGM16 znyn*t``#ay>v{6$Mz7ktYdhwJmOGE@U4eo!t)yg<40ml%i`jxjgD$K+JqQiMBG6?n zY|=W+=USv(KfR$-j;)p4`xr+2bdl<{_Bm4Jc}nGhnm=gcy;6CI5~WSkar$l*%KScd z`K^#P4=Z4A<;GmmTrMG}z5RQSj-crrV|1OR=_)fQt#4c@Hp*BE4pz`=vZ(!O zE&6eWrQt2iuzejpKV^Nh3s<2lS>Pn>YFz#bo&*<9IV0?M831s5)&7AWYHl( zx=1lkqBS{i(9(a+aSFvFR-ov4=g))T|GBeZeeos#s1)VTqvFJpao>#()wnZt{`%rL zS0a$95?YGvttgX$?H2i;z0tyKk^iEKQP{-u?Ss+Zua0%$|~s^wCm9` z@LzgQmj3o6b&ZnL+7!2kUeuEO-jy7R?n0RgsAD5q)RIl-Rbk>^q=j(WX64H zHyX{iH5$uc%_rbemN>sv+OpR4qP*VIx4)dRCnU2qcK$U6OYx(Hde`oV%4xEyz=ZO0 z#=h@Zt6G&oVv@xzv|@Ia|7cVL@*0i_=c7`lrGtBcgRgs}3Au0NS~F=Y-eKhC?uFjW zA90bUdMlZ!FG=z#-3Vm={u?h`Dm{@*?aA0x#j(F&Wy@(V(t-->DG+BO8M^A*R ze%;I8ar{o)68W=JOktbe$Nx56#uqadN0hRzq1ud- z&#_vkTpF;xpF1_tv~!6?aJ?Xa&Hg`cFfRb$yjlUx}TH$qZ~9XE(j) zg6GJU(W_37a9D?Uz5G{t8PG9bnBD$okL23N6YDnXC`bwVleq$G1vUaHJih5)l zk}zG}Zq~iMURo*9GS}^+^p^V%MjCl8-ziF2(|c^Uzg4y0^FiL!jP)vO{3PAlL!Ui@ z@cm|;EQW{3a-<)BR}V`vRi29A`Q5LlgSAvxW_t64`_(r&X^#AQTWRaS+zm@!dtn=m-EPN5JZ+Evu*$wjLZp}I|nE?c)bS+&P;!yTF|fA z!g^!J!S^*eN6g!v5~!rUX`5$CckY^9PU~|~#i;u9iOM`U;UEz^*w?|2$cYF1|K9-j zZVAWBl!!GcM&`A*WXRofy8B5(K0#Uw#lAY28h32;?Fou*Et`p0tt}t$ANl4zO%WeP z4@=Q|=fQXO8=LHmRf4PHH#*_d6epsL!JFx$i>fW-5QWXWthM~D=pI6+R2Ro}4C=q+ zwlnty@F~ubEoZF$`n<86rGM9RbZ+N|b7LFagGF*X{pqE#xgoSusSU0`Rr0T`1Q`cEPFu4M7vEuaAx=^ubu7xt*l|z8~)z=j}rA|vr zy^1KugobeHlYM$Q%jvfzM`sO=#r+g|&)ygDL^-wR_GhUl20}|_Ed>$^4ln$F5BMF3 zT}3^*C~l~uQ>P-p<1e52X4A;f9n2!Kbxd=Ung;RCkN!a_LI<7boo7Pe z#vY-#lZE4{PJ)s@d^ew#2IL%1z@U#-%Va^bz6uVgO+QhdNj_dY=PXe+hCkKH(k0-*cBrZQ4l@d8 z5}Z-3dm_S^ZFv!_&%~-w=36WOeLSLj4f;UD$2VCmH|3o+v6E611(enVqj*tPeF*I1 z9&P~rJT-ojTVObk+E9m2$1(h%aVGWK+2g5~=;*$tn}IzyJd1;$!~!GWexk1fWddEj zGMHOdrVS0p@6P--HA1OG-`F0=GYFptR2OE>-scUw?-+kW=F2L2(Y++`#Xm(h2Q-`0 z^J|)#9~a#p7X#1|7LS+1TXL)IoSG^x`B`+%6PP4bo z(HZ|5C>`UwRoLSV9;AWFa&AVZo2+{L98=7#BOW$&3OA*rcC@d!{nYM>QeF$JD=bPc zvco}L(kfmLxKQ*(T;Eo+O~2{)$~@f9{zpy9*8+tybqbA+!x3bh22)q_2^Wz!_w!gU zUg8pGHiYH7%32V zZoNF%6DR2yTj+O=|3G%niBht;dJ@eZ`dpK8T%xVOurG)b@uHzu+mnXxhj9{!EP*4p zYvn;@XylvG_5~DsT#Rv%u=N#aN0IKETiC=l+(a3gjT;+@TT8ow#*IOa%N9BAR$Je$ zqS)~dH1mju5D|P)*csUSJZ(!vs(y+gA&;2M)_or%M4*MDUsNPnFytJ`bS7Rb%)X!d zgQFv-V~1L$bj$0|V=pKQZExJzRz?3pwTjYk`yv~q{P}Ry4f6vS)t(fPE}}NGEJ7gm;xW61c_pwW|WlgC($rj>+v_B&pz9Zcwq@$mRCk z5+&cR!=;DfDMs#m+7vUI&MZoD3s~C{lz#J-u3tw}L8tDZ?+kf54pGmY)#IU^&YzHp zi$Mysm=q^wIJvbk$?@=ap}imCOtz`T8TO>z^W@k5ToG-Js~z0~VP3sySkHTM%q&X3 z9NL#bCXb~NF&3;>yQ?0`2C@;98&6I#l3!imuwWPF|E)0YDwy!nbFWXq_lE3!uMP52 z%oqGo_ipdm1UmE!g4#3;+1hQ4-C8}uE&(DcV_JI!r#*M%o{ojL_VBWrMFa8bD)BVO zXnXNketG#oN25FprR|^ZU>4#F1OKO|s#rmPIl)gIb>E_c)VXRs*hN(El2p36xij&R zgAJ^3L!a)ePG@;2jUP;8^m=V7iqVaPv(Jqzng-5jFJ7p4j=aR@f4}4hI{j(S`ge7= z8+9ki+}`xKad!V8KTf`Hyf9bJsIJtRcD>EMiMn-5KjjXvr&@SDKzi?ckC(+oqwlTWc_)RU*Kp1mG*ZiIx|G?xf~zVB7s4uD)y9k#TgVvC z@?RZdeJ&%uq|tNz5Sgt=n_;w#y|k7kqHev!QvBy%^vRF)zW{8XtoNLw&R){y?|_D+s0sJ(y0#K@5!qQ zhqx%&(;h5K_VqeC;c-f=hBMcvTt+HF6~)wkZ`%jjj!P;4$f0uO+b#%5RT4#X9VPI>JS`1o*r(8V(Wf7}Z`L zR&jA*NKobLv_r5-0{_TXeW!%4hJWWFNCS;lccWT$wsVCxGMIVD53NIt=Wu&NB*4o( ze;0n90la`Gc!9{V^}FcR9*bJh#K@ivVF~ne!myeaGes5TJ;VfzY>IhH)8aCk{jaIxa+>K|2_c9Xv`}zyU``-Kk2z` z7u+vh!uQ`Izk{2&^Zh&~;niUGqO)hl?}wRudj)g)w!{UqX?{iPpIXqs2nGpch(&W8 z3kV9T1CGGa;p&(EZ{AQw@H(V5R-X8oIaN6B#4k&_&~3Xn%~5e5U4t|AZ3^eO$*N5F z8u9ZN&)3y76k&XN|7aXFqA4x%86B;HlxG9pO(AZfM@kkczgmj?j)v1ie2*jhmyh-1 zLb<0eDFYdg{gQo~c}K$iZ&kBR@<-4x@Qz>LxYM4Z7PDFc?2B0@9n2U$AJ%Nu_LAk< z4RY2~@ZmxZX!U(nCBdreNzuPyoP+Nxl9!_bd;6kp-_g{WJ^6!N`6es;Yz4K4MV)W$ z+g6H&M8;=`Hd^4s#x?~CIm#zOwqwCHtt2Kb?K3(t!D}jT>C!FroCoh+(+bB)aF2~B zG2p+yV{JA0?R}PcACFm%m6er$MC02#E8)&?X6V(@0t)8ozBplFVdh|(m_@Mf*zKgJ=(DT@XA=sLu1)#l{9s+c4yg@01Ofmc;uXA{RnS9ce( z5|Z(4Z89)0s0G%N@bM&ORt^qjpk&ei`i82_U~yx0cvz2~kue}St?2gV`aBEV$Ghef zW@rw-soPj4hhDvlcI3$JZaK-TfFnnQjvy2LGZEM!MAc`XvEcdJG&#`}h&OrTcvD_E zN=TFBPARS)5RQ3CoDeNgR|u0xRu*%}<1wa4t(a-RBmyIXl-kR^fe9vn&i{z$a)JbJ4QMhybMsP*= zd49>p_J~I zSLc!YA*nNgRm-34zBD!Yg$oS8Rck$5h1&%rtbS=S|Bt!14$89q+D0W*T2hcsNl`>v zLR3<^yF^qPq@-aj)wJ{eAEIetXaC+20>~<{oFB$IC0u zbDb-Wb*!~uusY+-K)h&}Q2U)Zoff*ja9H%EX;=eOO-(9n`p=(QAv73Z8U<4naxcQ< zWL7GEtMz-1dEf&bc`RN^5m#F$SK(64{1x_Sw4a$YBz4o|A~K%a|B|m#2Yy0+@=@IP z^0b)6LKgx(0%}3zuaKX_SHfo-3$8k)rcZAr6wuRTP2#5d+A}l5uyTdQXk}`ZoL#QWaw{Uh0)w2l6J9mc~Csrf752F#5X5KviNo9uL3c>3|DAos9IT5~PY+x8D9O*MX;syRo%j*pP= z-@WuLf|fmVbXUJ+Wo%AAIoGvr-_K(3E6jw)h9Cx0rm+DN=CLP=>#?GFlfdHyFq#;j zw{8#rv3N=p;$(Y{T995LnLi#H#r|M+LK?QtL$}y6xh-dZ$>Mj|dP7PX!_R{n%$hcP z9A()eCe7`mc@GOhQ)K(ScLek*lu$1@ZH`X$N0&uSoR)!I?Im!7EZ*Nd$Fl1XnwNGW zjUYd@O4IG(KR%Z_@>Y4RJcrpHw{r%{Y{O@Mrxl%s`Pp`i5*dOi_ma(6eD~muJ9x0q ze_}4^I1+c1W|Jy}{^l$+BF=Hpz>D(9`xK#4B-sbywRN`J!T)&CEal}jk${z0WRp)3 zKRX%;+U&?NSFQhS_W4!TwxKTCuz~Z_|Gb#~#r2J#CwC7Pom49%`P@oly>}iuR+Vze z!0Hpjs|2-Pb0B1%=10S6v;XRM``XhV#ee$7$wEFkE*t5?EF>YA5Wy1_DfUZ7*hSGb zw2D)!;60u9Awv)Wjp`6IB2>g7=g#}y!sF)9A;mI$c^_J)<$O?mDR;r9Xsql(*0?EE zYH^B0S#JzWs8HwkLumW{Fnt307rpf|?lTX=_P!l@p6q9LyxrlRy|1Osa#EP(2Q+t) zLfvw?m8~%0?!?P?cEYk{B}nP`tFXP9>p ziS$5Hh2~wHXLgxL|3G)CoJp8K-yO{)oqg*mvFfWk7pkVjM5ymj+{NtXjg+*f7!RXn zL=Fsdg2cO>@CDsvjRzwY$?Ewk0>z1?*Z-a-nEB4-yCCl!C&YVEJ|vFae0+WlqauBo z0(HAEvm^rmb!ou!ijO@Y~*FJv`UZ$M8_LrE`~ zF61cn`IUmDQztTY1^VwL0woCRmX&a zSwKausUGk7lkHhC{T7m>Gln~vuZAKL*&N-t+VHNclBjOrV^HLQ+|_$slo1qy_x_!A zLKeH79@%e0zw!C1d)Ek}iK7=JA7xXYhh=_22+PdI+vFAH@I0EKnI1C zrQ9QufI7#GA&uu8Uw6i&FUqWXoPPP|V`Tm&1;bxcNAIglf2DFwZ?o@H%jOqeg>03% zAxRS%Y79g_AUd%zwxuBY6!u=6L{acXv49u}vt3TotjCoeM{sh&6X=yCzg!D~?v08% z)!(lL`xYq%BtHnQw41<7Y(=`*H#%qrRXHwo$zQI^&T=?9bbByb_Ex{%L+53a!qrwq z%r6+eU#N{oL@au#nXvs>^4y%jb9f>V#jTn5ta_(80Jn?S6`E}Z%3H>@dG){m-YkMF0r5q}( zMzWzJF}gij!bnln8Qh;nXR$s(BeCdeCmMH0E9iiBCtb|IC^N#?*K(c72M&n_y$92a z`k&wWbA=?XGi2<+*!EcPb2H!I5GEUWZfm?%e;Wf$?`}dn6LydUvfxhPyTHyYhmOdU zsjXQg$J?-cmpJJ*VhLtsj7Q@>Z7w}u*t|c|wW&FTHWTb)cJc2{PvHxXFwMxR_GZ ztYAac{j30?x(lNo(dkb$ z4l%5CRna-qBInxZgC_SVaP0orDO^KsZe&!tdDCg$UnZQlnEEx@2Axc7?_%}d+x-k9 zNK~7vby5P}5ZF(syit1{L2TvZ6?*;>CSzWH@;$5?yCHlP!yxF5%4vDuJ+JtjJ@l)@ zzd^NH{%5!0kB$z5nH4fO2WD$fI%fXh)~m>~8n0B5&W`}kK-KZ}rKc+AZ91AJ_+K2o zn|$0&cxb=&Y>jnnGpvJXeTny+-nr?^ncCU6wl7>5Vi&O>4Ox*eAx4y~KdsqrtL&r(**~O zz8D;rbV}}a$q7I^JE;qv=%IR*%YzxPLW&djvTU$A?MU)57Aeq6tgq<#-INJ5xBdNV z+dA~^9$3 z*cKb9NIKPG49d$BWVljfp(o@tTX}u|r~GDfx6AIDqiTxIzvW^ke1y69;Yc!p`42$+`6YG z6I4V*M1_K_+LiNCK5dtGyaRD9EYKJ^^&d}FIZBgps$}{TcYGL15q6RT8~%D+o~aI+ z_HT~aHYA0%igX-(B#bes3Y(T;6LVY)%h6lwyMF&8z4>k0LP<4r|GT`G^%Cb^WnSk) zB&lys(!OUzt;Biz@1FV=l`!-I7@(6M{TD0= z=0NR~@!9s(EcPu)Ygg)Xpe=ofKwBDF)PVm>jB3YU8IOS^GlQUrn_sW& z`y;JL5$ENo_wQx0Dw*UBN!MQ~hy7ys^}Wv5qWt5G~VK zRZIrR;1&;3E2fKG8ZOm3>z*(w+_!VzSqs1W8=P)zu0$uF&~LBP9?MC`yFbvTU?nXo z{0Y&x29LC4aXwiYZ^qO(EmvfrepP0SR2T(=sQb<}i>+}2L?Etnla5`Wi#`4{$tVe{ zWsRo~r?kWK%##aLw~JYC+Qm_$&K4>xs3h4l8&FL;8W9XIS7leCAi8w=bk)I)ZM)r<8?0JWQ}O7;#&gcW?1FMrVe>B1 z2QWiP<@C65i-EIb6BlVBr#aB-GgkRblXkhA!LE4dDFkx|JcNj0>{ z_7l4*$U5C;K^tJY~5UJJQDKKKC|m3zfeEmxWe*4B z$W@x{<2f|KDobOb5iR!4?*X*)J&Sb_Jko?^PK^}Mfw>@yAhH0fZBg_U9ZcRak6$U0 zR6Dc!6xwg*lKv#~yl1D_nBI3Cr>%D>u0U+)ICRzSHcQnGzfA6=R?HX|JDORdOxELRV6Ts}s&RmY%jpOqXNi zsa&hzvI0yk|Jgu%Drl`7s{gjvho-mMofo~G&bySrQM85Szg%R&!_jnT2E#5x%k9@!S@QivDf0)nN4^3 z*y(6Y$#k$4l|nnO?-LJ&P7Bu=KH}+aq#>f>>3OIPZuawWLwkZtZo;!b&3qEq`Lm$a zZ}~5jIeuiG7C>LLh*-t^C&l{RB` zoKL*&JIlYW=4J^5m~I_a6*;$wbw-NrJkvN>3>FVC)!Y39imt8%lYh7GQ`HJ>E4W6) zrS#&atytf{*4yvWc3U-fDHkJlyXI6>^s|?M;b^ zd)qHF=@F1Zz|lz~Z>QTF^Au;O;OI7)<(@~We+X|nY6t0Mj`x%(JNCT|f)1d5%rZV} z*RdH*5hwK=@fC5$N9Bi;pP5~meLu1SBw-Wrn;q~H#>(DRX*Og`tpg8mpso4tS6T+P zt->5OtJ7$VEaUQ)mb1`PSRMk*ysl?Vt2k|d5hnmB@Lpe|b_@EYzlY0#w^9bY0;3u;8^fbJh)-w})p#`|#UUzxat=XL%5x9EH`CE3^|d92h^l z_S+)zhUx9&bM1Tk_*^$Wr`iepE7*;Wn0vi*Cw9#Q*=Oz#YnK%Kq=~US(Mdz*76r9$ z#a7ZD{OK{spg35l2WN2;W=2vzk???sL`pwB573B*gL!YWVxr8g9z80HA6~YKG}Ug( zyo^;?|0@TX(vdoLumroUu^L}qgumMj(fK>AFEUS9mSH%j`uLDL=~S(o+Dekg>bTH= z*bZ@E?i$QZx$wKx#k3pT)2Rd)zoShSfQ5gxL=fJOr=X_2=IlDKW?A z(S1ErX*(sxV)vuESZfoMR+EpqrfgPl;LX!bG}XS8F-| zB0!KA^+{-R&2G15b$PtnNghH$rvBGT0wtc8NV8_3A3o<`HOW_!J(1C^T?iIc8@xPk zHU`x^(7tc+;kkyE2{q3VH06G~_vD-y8w5wBSnWH_7U`a^xFX8;La#>no6hsuPL9i! z!Cc62^;}JS9XwiQLN3Ib(WX4oM8$1QiYLlGU18_=tbixjNCanJkg??v91L4RuGvg-H;Hn1@{qvl?D+-cS`Xv2*-`03L-e^#=%n5aK2ZD*Q>+r)^IXaY zxKyXPSnaIwns}~W{hVUul*X=hQR5tO>D|suf8$Cg{vZJO-UH@hja4Pwm!yoxIcr$~2}K|a$CAC9Y-hYH)5RYDeDf(W z(eSbCE5%ff3s>)6`D|ALboAN^LJ9*Eg z-4_7lrq@mbJ(|B1&V%?(SeEFEr}4P;{kQWQn@+ThgbX+YN-|29&q|}CNjDz$d`g}B zXE(iZ+I*!1;DAZY-s2=?t*}J~3xNxSqkl~V7g#(oFlefe4Sly0{4@=BYC1qZ6SxgH zV13CegA|`%drKUBYraub05q_QHNcc}xM431&eDo9J^ zRAz~WI2k;%?WqxJ%%l=$W78?gCazXW6@IzbY_OBWYoV@k7$uROT)5!oaf~BSd#^r| zM7$^y`D1x3s>2twJ9b8ivE0D-z}{zHJS9v20(C;o{)waA&-Z9m&f6i`cTz9X=RN){ zdtLh$iTBkK7&4mkg?~zg+SDkLRuq}@D>v+XLTVT&~=E13(5$_d~`#c{Zsr3Zw3X zOQ+9&#y%aq00SlCAcrCRNG1P=Pn#_`A#r05Qtz!{@z5rW5k4|04~vRY4!b(cTMbAG znD-}dw>WJNCQq+l@@p|oWh$w1fMwF5Yz{XR8Q1m2p2>~4?`A}Ghf*8vc zWsJFwjWOVcPZ8+K_Zn-FRDi}hAUMm;oDiONZStD6hxvI17qedR9|8NqnX6etE+o@h z-=OyDj!HsX#rdov)3&2ZxQeK38)kZO>~4o^ERdbD)<|E8fSOAhvms_V(8rFiIQOEW69!e2CN>t1uLp){tE@=GbNNJ;B9<{ z?oeZkmw@@#g*gdt7r$`y z%gzIA`kiw5rpLF>EV(g44)e;ALVJ=vfuI=fhK*?4AU{7b-2b>Oge=ghLW3y*YyAFP z{p0A<`f zcI6j78s&sHW)cJeXqFSK=z2ux`<0T0CjXZD@>ARl@`8xe)7n&7bb^`Jxn9mhj8Yd_ zaR0*l^hL#I>>*YrbU#zgEY5ORe6%c%*F-&Aw&K;Y=&;kvqq%%LO#CMQXnqfR;4mEh0F-<@jtdDxpLXq zZl-yxOh^30VTnjhiD_Gd-g;*};j2|}WL1R!%+U(;Zj<`KZf?V{#h%AWWg zobp-KvpT`ZDY`Cm9LLe7$Q)a~nOn!2#_qWr@^z@A)*K4wtl6?&NEg8+;r_b;Q+m%a zBVp+#nLpN>V`zA)uMrWe)W!)UJG`|47ns{ADggPnjZcs-fc#U_)4Lj|l~z1i>DNh1 z+J9HN7Fp@q#pEm&NXa29ZFI8I8A`^zJ)I8A?{9R*7QVRlV6gW!#w+DzwQ(y{2$F}T z%vRCdgoRoMi7SMtm0_f~7v{GvO~J0r;U<{B~@m;X3NmJ#NCj)qXV?ixUX#(J7IQr*s0M z^@*>4fy#ZLB(fPz1CRSE#qkzO#vVT}L#TtA?Cl`+zBgLuTT8{wXT<1r& z_g$Ms`E2$bII2W(pNfJV7#uJbfr}@jSuLkEplgkXvF2o%jypKOH0x`9b}5;f_J?(BXcU zu@TSI;D6(pKC?isofh*oc3V_obM}sKV?o}?pZZwEN!mOXqCb9V@POUH+i%eWra)sN z0!~bD|Cg9<&X`DH_Hhp{a=Bf)MA3CXRBOaLY9X2Y|9&GO-FKF90`7<&PIYh-&Ungk z#!FCe?eflZI`vXRwlgjpISQ!pP`&tZjxh%Kk;?kY2?U(N=JqAM8CE^5FNw9Q4*EJ;E+}J zUhhyqKVm@63GMoS)I4VBePTt&=3=An^a7N3%eU?w)eLfe6w6 zfj3R-2$*e@=rTMGnq8BR&)4Lu55G>-nh)mTZP>IU#y)3R`s>K+JwNpy_XM!wSi9LC zWJxj49HaDS+eI5ux0Cotbgat0zKW#z2QS)1gDTU5*ooYp!mhjti@d5l#|M|MqTmXe|Egb5aE0HoI-o)rGM~1~Y!{#9qrmO=Co4Vw zDiMS6uhFQKhFjK>!Pj`mBC*NJ{|3~;O#^w3^07sXe~|ULK>$}a=PxrWJTG??o@RF8 zYI=Zu@Aw`4@lU*}d!s_iCn^U^1n~fB*q3}{8w*h?hv>iRFmM89A2*>?&N@D#!>X7` zrPoG*gS4wUe^KGjhydmY4r+OQ`c8m3$3v^Ae}_&QS#YM0R)A_qmm5w9IiF7kVSwt)tP83a4vLOM=jX`8@nij(Ibxp*Iv7oG2 z_dgLkA`K~i$t;<67{H>D^TjKO21o!?H;OTkTirOm_sv=7IeO$ajl9Pm$~vCfAS(8P ziXkaY9KL=qOq!R5`PXP5u;WWoJD zl?z{TQz^F_XJVNxRz1X6z&U$eIRaD~Fz5s)2HnC?i1@ETN3(?ERlL{)SNOP~3$xOX5U%p|KzUE z77<>@(Lu1L^(2_hu&M&5ShP8Gd-H=9oqRRkg@q>@yq;nXk+^MQbZAt^ zuIxJQ@n;@hJ#7VyM3kzPX*`RpD8_p$KaepL{C?~2XS&8V4$su7Z-*z>vAb=NZQfJk z9`IZl)%Z48P|Pt_qM-Hm*KjPiL5Cztx~(wjiEnaoE3-*LcdDhWRnWjf4*U@5NT6rj zIJW5@pQ%N_V~!t8F9t`g)uNdcK6?$3-AA1NbS?^dxVJH!bXDVFdT2Rfmwd8-6DhGl zD(qoQF63cXNXn_BTfIYl{+GZhX(XI-#_VlNiDvMef+B9Sg|`;ZWwP1}_fyay55#2g z1oeiLCt*Xl&;r}ZOZZ^UA|R5-#XAsKt<$28eDl8}|39qC*-YibgPfO}+ZHbQKj=ws zUbzRF0>DqkqPsKH`zF2DWH%*Qj?F%0JDH8knbaXBkPd6|6qcx(OAPfVzIIyKtA=XoL`-52l{mSYlS{m=gkWFWJ6D0Jlf zi*l{PFs79pCuX)rbO-G`bwvxV$9cE{{;2`~_TRbt9kzLi6loS;8*%Kf^QfseDu8^ zKT>`3+*{>+`-9U-DD8Sf(s*RnZS%K4`rFA0tNwvm!#|xbzE;Ug+W+M4mj1b3L-#56 z)gNk73^UeaSB+Td_$^wu=E#oU3;t}n0Zu>@kJ2&=wm%NC!I4k@dE9KIW9f4SBK)1w zbU)RS9`wIV%*bkpnrBxXz1*XYr~)iI^s~Eb(a~!Y6OO+(gz;ZK=q5m$oU2?_VT&+3 zHtSgKCl2lJ6^TYv*kanZ8 zr3Ug^JSn@X%wkW{!Y64ujSkm0~`FzkYsW@jg0Kt$XbB z7N~L|XMfTU3&A1sp`TuJN!wcLm21DFgYI`)1lcoB)SiX^TeK6RBU*uKR>a-j7*To? zZy~p`!|elyfp->0583h4tSO^@-+hfT!bHBEB$J6Bet2t*WL@lyDf@O+kb^Sn8)*oJ zgYGap^2O`tPBbF#S*rn0bij9`{Z8X{LC%RJZywl?>dmHEQsLQQ;Cq{7G9RlQGYvMK4cgRjSJ;~Sw@p!b_O|cdvmEvJ1cr6Gkx(Tp52ch z95~x5Lb&)`RXufjRCeI;AdH)Wf7W2+wv;#B@(+!A5H(Yij%0FWF$_^S60w%7~nnhaco82 z0Rq}^zkjc8jW!WQKu2hg$4|GtxMzk^iV`e#o2@)ag7dIM200%3tEql=41v5tT3*K=w22Wun19b8 z9YGxp!l{Sd!m5~}yuhLU1}0tQ^A^ltBO5eKyuKsSH}k~Zyv2y~^z3?oBTvr0+(25? z&{&<>7+E`<>t-J$)(kpv0^zfQnXWj^UKy!eU5)gYPb}oPEMVKWB0Rf}04nqnSkHE~ zq!V>3RxWB-e!-8LoLD(iLEC6_t61jgciztrrvA5)Xi)Qgpd}jCJ?>neX_>>vKOp}D z^o`GN)mT3@Gva*0;iKhfU`1GNU5S@%e=;#LN+w)3;)YZP|L33BT#+WX$AR#3qY8-p zKe%WST|`fdW?Q?J5-Q|4h+Ap(n~M%aTK@b8MoXY671T0 zCgR)g?Mq2RXi7~8P$tHQf4Y??kep&v*g9tPA~a2 z8>k3Z&D!z!GC?mQz&y)?&)E#uBnjXx8jUblcT+762DjztC(CTgAxGIg15CD@%O4Ea z=C7n*K=$bdvQJF^*{5MvyA8=O50T$$BegTdmmht21Pa!!Mv_Tw%f_;(xf3-Emz77K4`!t$55cgHS|&N(uBDTx&#gZ1Qw`=gQ3y=; z<^({TSD4>nUgq1UtRNy6XNQ$hzL9#*8mrMVSy1LE-F~omuQz36#9VR9X0nzYCI%-! z8V$QWmd{d?T_Y!M)KaLp;d#BoLP*Y8_p3(0oYzrm@e_tNpNa>@$yHEVRrTmwOi7uP zcaI57(=Kt_x)&e0VW#xeYNRA7bbQFI{NccSr1&Z~js#mfZNK1S;S`|hhMPfJCg)+jOAp%wek`}ejMi$e>GGf*2@pyDNG5;C4xi|hu~k5n^nVJD7N1IcGJ@q)B= zBWvq8$?o@!xsGu6&sI+7c312cyf)`$JL5SMzvOBu;gGUTZ)$5t0p!w{>XLTzZ04l{ zftI)s4greV?qc2qJzkm~X%! zP+%l5>U%$h-{5h#(V1=mjWbTILcoqjNdJw%;GTzpU*Fd@7vy()FL#y?=Spellq^Ka zX;{j(ag;N=_gwzUTetH!ERH190+vsaAl=PvQ0pGn0A83`Hli9xW3dS>1r7QX){sa%*8~ z^83j;_W@`b^M?yq=USN+D*=RIkb~z0u`mB1_ox<1U^e%c(y4|n8C0u(AI!IGUE7@p zeq9{oUX=*w#Hb5%&eC}(Gcqxm1Ieg$nyxMsyV#|7PrQ^_zRGc)8R~ryz!Gii1h8yO z#vFZT^&%(oyQm~%NFu~OhJ^>q-(ok1Mi=C+i2?U@Xv_=)(NU++Z_|0*;*g}TIAl>; zb(=eFs{-kTOC@=Uh%YLca_omQh>b7^UU%`IwV9~ythArQqwMvUxztQRaiy`~nWehJ zS~k!PHi0HA^Zm

(ad0t;G+qkgUUPo%NRlnq8bzj;Q>Q_tC!ea{*iK;f|sBQ@MRW z)g2}DTQR_pABIjrZ@93Y5URtzaRI%zxNcU8lItGB`5%_yVYkvGD%ZZS_nLFw`UGl- z>U`DHyk52l(XggNV88NxrcPxptt9o^i*RpKDOz~6K7GSn=nyulwI&;~L?yuf@Qt^H zLV@^~JUF78@0q$JMJG@5V*@?jkB;Cg9cdZ#e7UIb{H{h<|1O6Yp^5W;_)!bX-isSg z@(e<2p=@8H z20BtKbqBH;i#<9lYKvAe3})xMjqYu0<=uUtm5cqx0o=cb!owF(4lgZw?|FqM7SI#2 zl2|5B8PBDqh$5gd;I4vdp(W@J%FJM|6q0kpX!A;(u^=eytFYW9r^Q*N8;7pVvS zu+nbvTLQY@K$VTlmp3Y#y&`L0-wb@}!QOgxsX_(od}UF4JCU^{q111zeBJVr62ZwT zM+-mhH=izRbNncJIKc51I2;i|bvp2}N<3AjHH!U0m za+z+`4JkUS4zq%ec*n~3fn(N=jg`}ZR0pyaoiS}G@zX*(*@Ihni*z3Az6O>i%bmDQ zbICdZ~7E@|E#yD!tWS%Dr6G8Mnn6&UK$_58v>&6lpOg#@-W*Pq|J8~#SAk#%>=jj?cNfr_tY z(Iz6P4$j)Mhqy@y-7rtT>Z)NC5Xjh6bS#XGW`>Ns4W#F2lz?k)))ApG`({L!WVOU( zBo{-UFOp8&5dMzg`pw}dK5sF`v2IFKz-p^6`I7nH3-+%-OXwR;rm&`O)Zq8;hO9g= z_I7{WPW_6p!xQMl$>m0ZW4DI?V-hGHZ!-y>&uEUS_}u1QRIPCf9J613U`4fT-<2xz z&UnoHAx8M4F^^F|y3S5FCD1zqvvRDdbQX{BS>$m140 z_=Zp6gsQ$$WCCc+Vf0~)NWyfPKMM(2fq|z>WO-m;htaJ&Umq_Xla(;fqHSfeL*>IW z-kqxvKKU@}V{Q*GAsQ*a!d>JJf@sp zw^rqkfB<&%TemA8NO==ebtPQlgjiHF@rIzN!+fy0QhbkMuCF;)p24;yJ=c?&oniiA zoELUa!$tj8R-*F?18S_T--h!%9GVoKnqb!zDS-`jZL=0?x+I`4nVkP|&fw8yG&8?}GJ>MjcEg?> zbT60^ck?V$_n=1kyHyQ$$qfi)UTR(bQFlH(V#6QP55lO_*LMOQelAcm%J}h({+XaN z(0lzQZrOK>y{x18qkZXCV5bR4@Hmr1#xo=5VQp|(1$?0zuw zynBv%HlyP6?K-=T6&e3`b^IBEtv`W9o8uW%K2Wkxx!jGXLT*+{-ZNMM6;v_6&l}^s z7avGR^tJ9OLln5bd}+xxdWzUO9LZEZSManZg#~LWpmf`hj)Pv}PJ8*{bhta&_83mv zbg+Aq+SWr_j6T^$EzF*^>-S4_Kd`>M{i7gfANKlvV1wq{-ytqm6H#Be<2lq+zJKv% z_%yVE4(Q1`{VC4buq5g~MVGF#*~KAj6ezMC@;J_O5?aB3ogE7+Za_Si>D%aw$jTXF ze#ohlY}8~6gF4~#a&|TWO>IHNm5}5eG-%enn5>)5D%ki#9 zI3g?m6pmIGA$Dp2Qqx=QT*q1Qf|@IYhdd1r*vu;KxGm-t`2jq6ZpA~TBbj>d@EI-) zPGCih%ls($CFs6mg;f7V;OuY*9#}Zq1;y_F0&`2{t}z#cTeMe!Z>JP~po5Wkh4ybR z0Ni=PBI{k7o%XsAFQ89C)m8lf1oc|&vPc4O%I%T&j9<0FSxb30*P+ju8#7N19(}@$ z8tuOto+7PlOP=z;AFSgHx~EpYaD{c_FPZ}-rp6d@agQRh={ozuOi`S+7mb7u z5h+|OzMW9OMw4|WDK=~i!Xo@nd-^>Sbg2|c&Wdd3$9$Wa5oZd1ST})q6!M8*a&B>j zcs%PW-s`}d$EoKcu>3t^$G|^i84RzzRJJmnk&PmhkzpHb@%f6&vkfp02RqQ~HX+22 zhr%5vqFg4-Heds5zhvPzwN$~=>IdHD)g0gQHzVjH?`5VjHH#tMZrYz+lFL>*5>(BI@^ttw<_yPQ9`D84)HsJVqH@Iht>xWxFX8Iq@I zoPIoGmL)DfTXxCJMKDR^6s~FMvq^}FkjmMrU~rmlBGsXvA$s8i>r>t(-Ac8~@%^PT z9u8iHRO4Zn6BBi{bPPHn&sW14ZNKBU1w7j%1f;`0;r*jnndh598f-7*<oO9iS96p}iWfta&YEKqQG zI#h0LSF9w_Jl^@opaKg;;k)2=Z`*g10{Y~6JFcP+Hl)aD7XFpL(Dz_LGwnO3y=e(2 zB%~IrpVZvPCg26a6aUKFdW1@ppMwY(mg<1;dh#ZgZ!1uAl9T)YznP$5;@P@&OfLT(Z&vEM({zQIgW`y|oCHmO%1u=Q== zE|W47We;K-or;AjnS6c$3AKS!BZqnTk4_Lo>hj+{ztkWtX#B8+n`@(e*PXGlrL)37 z`%%O8pq|uK|24oZ@>Z>Ymrr3-35+kU+z z`}|r`7Nh1jJ868*xoz+KxU8TZy#)=Zc+)KpFOf4lrYRhsl)rx>fR3te&7#&ZgYDkR zjgJ*@J}Yifup;TFD;H4*in?7uGKxTyN0KLS56bA})YZo`e}i3qX**TFA@yguw66=; z``MonHZgPo*yv*BH<@V;*zDo$qe%P!9X2TW9^P#=5G{71`wi$eQfD{)d107@+w=ay;%stP(GveX!lCaY2rBw}5)E6EqTcu{Ia zD9>P6^4igStdcrUrQ9FrnK0ni|4Ly2>XnM?^xF_kV~W$}Y|qQ{9vjvvMsJ6&!QNH` zeaytXC*}4p{C#Lh@$}Ce-dM@v9zeC14ld~3G7Giyn9&dG%28jzcB`8ZSv0+ltiyl* zMUrR>dcXQx@%ydJ3#UzS<4PPEN?2yhpH&zw^NWm8(3IThs9AX92=?RyPCaW|u=;I1<uF1Zq zA}Pz|L7oZZT-q;6N}yTGSMhO0ayI2gwg|cl*MI-)mm2q&MZ5$xHh#55w>N}W8n*6s z#c1@Viq6^Hf2;bsSo^#;SJG7rC+&8fc7XBmcE6Z&s>cr6Za>mYaYL;BfBV@PVL}Nf zov4GvV(+s&;rOUc4o_Euxf5?vugQbJ+7&>8fk-QeYc{|w((9myfKH1gPy6@WXg^gD^;;v#lHrCf8 zWB`%NZYz!A&ggSqi-=liDj{{As~y^NTpSLiyb;=gG*9Syrc?_O1v`DO>-sq|aOu-( z_i;NvwVX{MSh>;d?P7N`l~+sb^+JmIWWA?6cKg`7yu=FIU1!a>|Gq}FjD+A{D5hV( zw~?7}?xb$`_!~o1Un@s-7pTnxs7#_6*%Gudn?bj3-`rd_m4L_W919R_gUO~$7A^PxZfHSMrY4^Q{CWwxBTV@Vjz1%SdLSx zcaE1>!Pu3O6hW!i#;daQ1=hyiDTNcwa$wFt*sK@657mpY?cr2Z52_p%VxTXO_R}7z zR}1s-zmTFP#A8L;ILNJETbetB-V9Jv#DLUbM4ujNVv>%C3!;7eUPc5eza8X%LtQcM zPGMUyfIw1W`WDi+Zn1^lO~s0S1lU{*!_70P*%2tGmBBJsk6F@qWvZb;Lx{#+`qXxu zGM?7>(+Q6~t=WbJM?=-7`~!EgM&JTnaK7;y zjGaHJ01(8xTcidt<&{IDru?1LfrQYm#@ew$*d<7kS5Fzx%D$UqnxC0{sn6Z#_n)IbeHq2^Q;^n~ zG-BL4nk{@_TqDM42i)j=r*G#h(l(77~YAe#u$efMRl> z$u!p9m&*`5#TsnjAlFEfK|uyUu0G^+&A#BtyiZIiLBb%-aOJ`p(BIKSQ3-T7fnz zfVl&43)`SwxitZWKwdM|waOZL)7}=Loo9jTVOa!r`}p!^l?8!cy%xI2_!8|7Kv(Me zqzT$*K3X1DF}g=RF>_bn%F$FWu@}Ziys0LCG(PePfXx6?i?Ss0Jj5gBdnn41tHmYz zMOj!1KkohKUhm52H#j=~_^R}Gv3rt)bm)y2I<=IaU$b25w^v0G5*MY>#d}gOcK_LZ zvRM{j!>km2=c_{K&fg+;Ry_ka^N5z4+@X1Ze@-+4$v^!<*k;}F zpnh1h>=0ammk$SW78y5sMOjFnoXxXr0Ce8fT2SsiWNM$&Y14|}vMQyiYmR{H>7v=g zajvHkW478FqBLt7VFDG(xgxSg6FzI_{QT}Qk;lIb!S#>4+V6+Lu$mkd7MAE56n;jH zKKjbOz}ew_k-3`D@Sm+B{EXR7@PpkBjpn!++0Sp<;Qx^dP;HU;UN8zv)}?SH|SO(zd3I|IlAn$a=71}u$>;e+4r1?*y7kAaGB`I~YcbI%%05f0k z&8yH<7Q}sOWH@%81h<;gDy61Do5&gW7d#d}T)*kdX`zLVsa^S0%4ci|qGLH|2D5Bf z4f!f0IW|o}LSps~&%YkPcNy@isH)-p zZUt@ILzHuR7mKTG<+O|7NUrqj@C&?BDa^ICAfpQ<6yt?;Vme%qfNPcU&tZYI-3v|}W}9Qf)1K1a4K@pSAg z#axHu2EI56pNi9u6_p`S;vha;=IhBD4#h+gjKlrp?lq&UeaXtDl>`aTpvvuhC8IOT zG-YK_Aek+ccEU1op_4ZWIW9h0*mN~t*RAmWDlFv~ngzGm@lldSwMa@HN6}M>%Jy?a#+`T>G#v-nyY7Xv z4VRX4Q#mv^+LpY2HCxKk6C$y|=%ZzpbkFO}D>i%&CKc0MH-+96E(QEk(isYHyzlp2 zmR=G^_s-Mh=ohDYEgZ!weGe)-n^$`EEA4LcTaBCBD|zab-0fyq5!uyRot-F%B9HM& zheDmvSG798j8Op6=Z`W0ZtS+=Vy#ODY}CqRH#|{nH3-ucj`5%Wn>vmVo4O#*jJ?g5 zp*C4=t1lB&|BSq{G3vvG*Tk`Haw8I{t*f;XoMHFf zJ!rTjeU47~;G7nl(fH<*&Fo!1=u_C+9jM?w-|Ou%Up$IGKcHzlrR7=s3=xI@QVZf( z6c+ybn6W6sd<>i3YBsa~>)`voU@$)D#Te=b?wiBtACJ=^GA|!0z2CXy-MN~oHy7{u zJ?Y}-Hyf54pOb)qC5KAUqRU zP2!hz`P=6c0Iz{j^cB_z^TQjw=ZE^+7AB6&uB) zD7gK{!rBT|I3q9=G!1PgCxg$9V(+A+^OyxX%-N0!EN>7D9qfgMg~b6YY4p53WrN3l zNzZdW`Kk{cnx~;9nq%d=8tdDDz(oNTCkx8yi`Uj_W%( zVReYSvx&-y9EeY_Ugg%+A;a^*%9DhqH7lx0Fplu@CEc(M%?i71IoFPS9!&>##_(#*PRWZ7UeLUSE{0Tp9ak&?Y>0t2h9Is@S1kWi&>&%o zJzT%FyzidO^~)qdgO5(}IVTgBCeCX`>QcEO%{I-ayNtTmCdup6MxB?f3xkdNkrHXQ zxo`pEfuP`E#ElI3lBd)E%?bSf7-(#x7ynkfIP(xv?%1o=@=$x0Jj95wYk={PyYIS` z>XHq1TyYG?DMPUMdP=h?s-dSK1C*T)%f5p$pBsqgQC6Z8p)*F{;$i}XiQ4iTh#dKi?Ua>##?{nMwHh*?m$NYq=EETxWnx^r8(e)N!Rc%qbuxx1+ z5lLwg>28ozQM$WZq`S)ir3C?LknZjV>5}dSkuC|zJJ$x!z2Eoz|MQ%)&)KlqYpprQ z9OE7Ds3IPn#OLgNk^E7?$Ug??v7bLV2ZgYKo=u0gl9GYNo2~VwApDZjsKg-x#q0sp zQ+}LcIE;3X7py>LjmvAe}k^4}vl8s=uc57cAOHdc=_YZn(!Mz_3_csa}csFN2 z>JL=eZ3~K;Q$RlkyHt=>2V8Z1Tz_)r%N?&Pg$X2dY}rLS5U%ZQ&FiB9g($n~Pl}qU zKk64qA<_L;ms2SN8!KoPA9G~NQWw8gK{F;nPE2#<7KucBo^AU}KNlkzOxm1NrIkfs zmt>fAX0f21EMs(uVK^#h_^t^Pz(TurGV&ydU+=5*N^69SURmDnX ztGeKGUv~LqcN?)t`ZW)SwVvpMV!a`fs)IRVmt4o&E@dNA$4aD3PZ7MZ{wHTc2RhF# z-gKVDyWX}1jw>RDf7^Bk+O~5fiqzQF#|Zh6CD+Zy-hR~A4Jo%>H;gl4V4ZTn5Xe~s z-PQ0{fxJIn!^8_4c~ZZBDtTY^nbv2`k~ zg*mNeb1$V6p3#In+y4YIKSgcCV}{F+x(mV&YemO<;A2b+39mw;uF2ia^0DfJ(u^K}~X+c1IwoA-88 zS2bIaruA|1#uqQZL8~=*)}0@Y=|C-k59gTh7YoEAXu||NPMt@lYh8vcNUB+Tc)mdy z3M?Hl965{k=q`W<{2FiRKVG)68}U-LOflVyQ@Sh+z>n@T)eln)+Sl-dlFMo~l77PW z^;zEqR0VE1)_zBnm*r%ihJXs{flV8Skp{%{1n^g?SkUZ|HXGcO2YOF4@HcubMtUvc1I=M56Sl(hmC@ImGv=Rw~(Lpok@ zE=c598f@NE;1j&ZdSh{5zBKPg`oKHG;&mi_^yDi7+~4DbQ`+9LAvVuX1OVGr_xN2^ zqd*Vlk6sujepUDF#Ul_9MZ(=bIt3|7j?til*0aCM;f`lNUi?Ef2>lO#>f^sr3f&ES^rm&9l0+LbPB zK$u-68t9me1#ncCIQ*3Vj%PE$ApDQMKlKtKjyJUciQm1E}l}X7ho=Y$V(1 zxWgA8J!cs-haK%0qr$X&0I28G@Hi5P&BM%cV7}7|dMA7V?TOU4)bhI53_yytAqWX! zeB1{?m`j}9J2Hc6k+NMEmX!gLK;sWbkdJoTv}bQ>3V_|Kr(j+R*em{3 zL7CmOOcN$n|7=1FE3iMY!Thh>lGxwXA8HO;x9> z{s0|;48)@t;~TYDn*nM6=q9@ z(5=S^Xppx=P+xBvg>-5gEVQf3vjWM4XKHGl&BW|5(ClUNS0RAGP&Zn`zBn~-dNMQx zTh`SzYDMrZLFP0_zm2`pQ$Ql=fa;sUW{{R3^66&#Q{xC(EHgyu(Rtcsoz zQiy&9Xf;K(PYDbf<%+8(D36&m1!LcIiS5*H3eFb96%@0yBlv&2doqGXY35_ZttpsPCOu9*YcY$x2#vt)w=^j z!l3lFO$@xE1olzrEb^YB%1vW+SenKr3Yv2qyut1M)rGFJxsNVrr{pRY`!vFc&!7D* z=@JOIccyh0+5R0=>UOipjeF+M_q)A?Qe14yH7Hdvx<*?g@NPn=P$(Agdr}{^!+T$O zH=vDcupc@lUaLT$1;IbgF;H&QPQ!)da&SjLL8;An9DmN@QDmLJL{x*>%-U}At9BMOk3(&?fesLY`a~;j1~Ao!f;t15 zfl1mWZA3Tr6bw}wipJto(7UcCiqPrx-TS*DE{K~4fjowAhe-wPe+2-w>Uta@Ijf_WDLsx9kmOc!#XTR$Tb`b+Lyw}{qkXJt$coVgI$bI{k2uIZjNneRG(00;dm z5tjW#mtO>GmDb6$=uw@ZdZ4Brw2&jm*5H`e|B)bjx2agj2iB#Jd&c^F{zJE(8v=|n zR}!KHiuKfDLE|2`m+|R(iMpy!D9mSTc@oY{HIh%#qO`++rRU9i82$NysN5_)b_>!G zzz&O_%c=c9+D#)r2aGN*|daO6(f=nP2_GqI0hryOZcHD&zs;AInvo#nm9-IoP4gIcPj!ne08vH*yM=*WQ@jSN zaE$)wj%cBYTp+*m##qCKKjzt;`yg6rS_b;0hXPQQ!wRs@Lu(sKUDD<8an-W>*1&Y! zyQ5b(S5k#h^3n$2PsFp;YAh@R;egW~+*Xi!4^1QGHb-VmBX#{vPH_bgO&F7o08WGB zR?K7xez(jx8@^=x$25tkY>yB*WX1-Yx#~xSf^4cFu;l$<*7MB5z{=2$|MP;)=^xXg zb6FvPZp^+IKB$k_u;_9w&?AqfmzYXSLt}(EMCwRw8f~DP7eT&{lx&L z?C(b(7D0m9qYL{q0XqV>cf9-L-!4IGFY%rax}E@LBal|+m)~o8jGZzE+=x(sC$q`F z9e?+&*#;4~spKl`_SxofgK_QgGP5XMoj0hfx}O?Azp-|@b+lys2oZX#>B=G&(Nw*w zH!-azuVFX9op+dV@g{!_S&aYY$ z{#%GpZx7h+>Lxi*&ObH;`Yf{Uhd&Xsr2=&YZ+V16WUN@u1OxiG=MS=9c5X?uikuqTMw-&BSYRgXb<9N`dv73(> zUUUIhRVG>w{g;-%T?-c^K(XFbvo6TziFJGk1>boIxi5=9+!7maj`>jN$I(=XR!sM{ zY^30{s#LuwKf=w3-$t}5YrdT^V~8jpRlazU2SYFZRNPgMDoh3jNMfO)VMe;I zN>+|ZzTS?1Vi#3)EZG0h&IHs%+Ic?YsM_(EJD4Y?%{lQ#v+b!grOj~;T~lS=Qshc; zzC8c?eRP&1{rfxj?-FPmJ4Req1g+J1_0lFdCdQG*27&;UK_wAu6D1jP{^}KSKKiCp z<$0cJX;MhLsUShKjidTcsTM)2*XdAV;9zU@QsUM5>UUU_(j%3LHrhr;sII3N#00eZ z&MF!%^8WlDc&=Vy$pB`QaK&ugRaL>w3upCD`Tjj{f5H_D_)m_4Apq6k8^s>(aav4* z8Rfsctp3>K#4x{yuJqts(avO!y+3clU`g9^=;WCb${CC&Z09zQ-r?QLR-5*-6>P zfz@BBvFr&YWovw0R*59tWUe9h#)e|y^+U6IboH8b zM1PPGY~h5N3u*)@-)fDcubObk=HBkd)3X0k(+`#1rlzM?AHQditp)i7$K%7iy_6hi zw&iAHeKTWDa~>+mDFVN9g!U*xo-Nj20W_(-cA-Vp6`3$fS6?hiYvZ!6uZvn z%bnqru0aN4wTIRHI2OF20-#dOiI0!XKhfhz&8Y#T_{qGi;3B3Yqy~{AmrjtKB(>VT zooV}XyS_b;K;h`?-l;F4rFG)i#PGp&BRlX8W~Znqk>H<^Jq;F)N_Bwo!g=L~ThSU)AioxXdEHbirOawl! zRU3Kbq6ncq5U3I_x0=(oopBhex~;=|D}D4GLl%8y7O9&OthM?=Nkz+OeGbR)PPtGK zNe%`^OY{}wT@a+B`pb?aSyeenWT!dpq7aH$?n3o4NwHR z&5lIZLnFZwfKAqc@l2>u@|9cAPgEQ?&-HePBu5G)c`h7RZcUfV{pyL+FLg26rXrcL z#e#pn3dUEFb2ZyfeX-*)Ijj~o%+OjDuD5)%Y~DhYFvrWjl0N|` z{1Q2d@Awjz2^5~6jx})lelIp$0r=SQJ{=#Qh9a5d;Ly-Zqk-y>4ZQ5!{My~-JA?He zZbN&;eA!P5d*b=VxKrQ7?l)YWH5ke0?79LBGYE;Mls-tz@ZRI`m;5qqS$G;6i3^{0Lm~P&eQ`Kj`iRH zxtf|<+h5?HEB+Y+G{OpSe3%c@SS?;gyxMv+xn`XD#XBi7p7Bx;07xHNK+egAd`Xq)}QsH?`Z#p(Lgz>m&3=`Ag&gmQ%yx#;YL=h{I z9Kpo*_dks)fX)1@9x{R27R9f99KA z@DOGmcaZi{iSFMY)v)TkI`6;2_oKUmy%Da=ujpbw0Y)DU=onr7JQx)|UtV7Bim@hw z4h#H*-9l(URiu=aTr-WA@AZX-9qOWvtNA{V|MvkhRiw?!c$3cWA>!NBTeL%CqzQNH z%lz5c*d*-N3=jK$An5>NSkz?=u)be^a~M0A>f(%-oTDn0XAWd&`>7NNn;Q*n9BG3J z+HKY*K(f*enEg>UQ6Q+J1Yd^#9-UczTOTCYY@c2IekHi?9M8xCO>&9?7B=Gde&-6Y zgXs$3bfS7dR>BiO!G`8~43cN6wjj==Z4J_(L2|)oLoHjiC@04}6R1>G>t{bm#+q4m zK-`lS3 zLj&m9&Y`8_*q+@E7z=UFy#SrBnYx8*3DsU($*i#w!+vO8j=eW}9lq|cr_5J%6+sO$ zi7xBH1qOv*K> z2Sj%a`sd~I{Ubjk3mvPN@)@rhZ_QvM8 zKc`1s-_Mcb5z3^T>b1Yw^BI|kUnvL$t?VdKC$G1vd;=O+YW2@Ws(rRlM3M4-Ne&!TQL{t{e< zck{fNX4wz7KCS&XjyAK}o5^$a@BTr^3aOVvn|iAf-V*waL|cDvjEhzX)SGkOh#%xE zN_m*k_zkWMvdM*#Dj&Vkdx!4hc&ynGSS;8nS3cBza zp|_FT57V=5)8Mn-Jluyu?*h+N@*M6DUT?lVy8ht^_BS?z;#1wrr()}o^ri|XDkV|t zTB1pnu9lh~!EJJDc?%T{C6XNY#EWx&l^p(;|(ch9+`OPvOF(F(k& z-kCAPiG4N>K_+lJao~3RhK7vuAwjbSb2Bl5R~xAz`D{l!*_Tpi*JIYbc|24#1f9!1 z1wCb(3T;-K^&#?&&L1}lB*_-cvyPauMuxt#kqd_C@3ACa0DWc09q=tfq3s6mZgy`T zA3^7;k8KMfLdpGGu73Nu5VP#{1>C8HlE(ci?Sr5jmo@NFr+j-b+6`88f4g=p(Aags z4T4Cwe12l@9>%68XSLBZ`A`C{jB(+GV1dM&N4NNHE{ll? z?hrw5H68vQ7l|Ip-^p@{+Rj*-rP>{U2i%%;_&8wiAw>6!^sJ$0_)Pz{B;8vOz9sBl zj^A*uf2f25aG4UzBz(~Bi2_)E!Llkm@H>FJFP(?jJJ9e+{{6Yn&7a@(4E+Wl4}1%T z3l?2N=sOxr0GpbY_G^n80oKEz7y25d`wQ{EA0e2agIU7A>VzqRwZX@_G{rJdw~QapuYTl;o`fVl$`RUoCl#8^WuSl`U+yXxjJ2J zy%wb(hB3a*E8CJ8hV_@C;+pP!rXx?a)k_1zyYtVp*x%r<2=w1UY?cEG-|R$AT+C!qegN5y6dM`2J-> zMm(nsEwT!6ub;oIR%M^OqEQ}fW`-Ex7hK!5&%yqJY#5Tf$%jS);JJkEfk#R=z31DJ z_TN(mA)Wu_lX0GwZg-TU7jJf0Ww zxj*Y6QGc>|n}(5*QJTw5zbXOP`Ui# zr~j!2OjJ9ciaB@TNT$ETvOf_Pe|%65`!+8Am}0}_YiBoIXq{Wha$^%wiq;ly*-Aob zdzy)@PRN)I76KLO?=+M@Q4#*ii2m`rF9;~W#pc1N&wA7K3>q6qW5CbpJPg4LAe4^V z4PW9~P^6ad`Shy0XzR!A1Bw5A1o~Vd<2<_W8eO%$bRjkwc>Iu$Jxy2g~caVMbU-5GyLy7`XyK&Ac2HCSM5PmuE(k$0F*KiQ3L=-G& z)B|_6;J;~%g6S}5k=e~T_f@`(9W4rriBa5|0*RGT9A*_+`@<%wF5RQ7zb;L~LdTOQ zFKozRKDdSavT?C?uay}!40|@H#%3P<{%hj}zim>qlQSs=QmOZr|A|-#%;zM2^$Ut; zUa^0PXS{yOU>tn5)3*kjr|(V0)^EQxNB!X6m@va=c-!q zoBDIR?e3>E7wHUYtxvHWuSE=%#s`f4?C#r&eUhb7oGKN+HCFY2`iPE;>ma4Exb-T+ z#p`bhI_9gcPYWLAHFk0XVru&3m`RL-J{@HnEh=Xzo=x% zdfI|&o=~u9@H(t9hNtrzNF>_71Iue`haqNpL~e?S!(j1{&y`3d539_@JXv}jLj?#r z`&p+$g{xVg3;ZVi&=F6i6#R-=%>S*$H=!giuotpCSo;j^; zFHxOwoZn8?6?<7#1I@*7apn;9=~K0K!QL<#AD_%6BYeQGEPJMM=vm+9v%iUtH!mQ=XJvw6POBN&NXAd_Evix+Ow# za>=PdRRg~r{tz#%gz2e_yQ5V9wUX(m zoZ-O4+xq~rwWG(QQ$Y=t-5ZZ0`l5`H7if2nVwpPjDH9(Jh2)Zwe@4rrM+!XHM>pJ= zrRT9`Zm%qNjGwfXnOquVtFo$;XWd~25P~TEeu624AtKNMYYtp_9QIQ+y$%Xi{7L6z z)!2gJ=mJtx!8oeBdkrgfPP1P-K!VEOvDrK0&v0|f{O4#Ylv&hT5!{HTZ;nVJDk7`w zW;oga4NRDNx2eXe6ED#tN&FQaVagD~(ztOD<=C2;)SbF#&Ka6Z69S0cT&H1?Da#;OZY@|3z1Cl}El;Hyu^+ z-3qZ;oy=g4~kLz6aFWB z<(8omzB=S7UTpQgbv8VZo`CQoraR2;>O9W9wwr1__^9ZGLf782K zH90o?*yA?Q$UpvHV}kKuyx26){M0c*{6i;meekkz%`0stR(T+Y1f=*D1FU_hSX`(- zS=qOR_h1O5sK4*i8tGXkUzpj7r25b6-s@%>~ z7El@k|DC60BuL073b)M&xR>SsuEgI*bgu_0===fmts9j9kS!+8|Ad3+18S@ch&?dU zIzgE~D)JSEVzIna?#0t2K8vwt`Pmm7Qv%MYE~H(b9OsES zm1~_@$EfFUuZCtdxDAqoF9#rh3od#OZ5Mpcu3x;V5TrME~(xSb(#4eA=6%Q#o2410jGxGcP5SH zywyrjY&l}PHsn<}?K0OQ%wb>b|9;(u8UK2-h?tn6FapsaNetyr1+0#Ly#T~!6o>># zpTZwZR|>cV98~J$&^yJ)g-5Yz`=^PdP zmz&_(c~?@*EwxtRLvH-L*Q-^InyXnZ%fMu#svldSNZQdb9Vw9KD3UaujlBh5Hoz1U zsrQFZKuhb1$E|Ix_%!6~V;sMo@>A(}&=rgF%;WAf=6`lnL-%adYNbj|Aq@l|Sr4kJ z1~Y9GYOLgzE@}H{JvH-`rW@Kkh7fMH>O@DkFR=5wgb-EMvGU-s8~x=pD)yN|tQ+)j zO%wn%J0~DfK;l3|x4UmA)8Xr&(=C%0w&i2F1nv~n8MQVCUX@foKm zG~e?wn$@|amK$JUbvZ2=ELMu+Cmy=UN7vy{8u`2fOj@f;5y*gCD3wCT)&<_OLBlHGsbGm#W(^$`F;x;g@ z-@l7=jz?Kiul;6&^ASSMs6F328#bp(Bt*4s@}|!NSQc1JjX&SWI5|yHiM>O4zP9{x zro0C|8m(%rcv-oN1WnJ(+u%GKSUwzZx%^unpTmVgm_nCEHkYCw@=K8~x~65`8Smaj zgzl@c0tR><54?D#>0||!x!KU!{~$9d>gGgUOUKhutvQ!DMIf`E?r-Z`MJXsL?*VTi zUAmK&)28tH*m+X`uZwSyh2=H)N-}c*w*0nIOcuely*Ut6l=r+PM7s*wC5s&&uWZ|dAP;oVl6K+UxsSx+y zOacd)zXylj`v!+idBy&s{)%4W@0Ojr*qN)enTgV=3tvWHh(di93w=%Y;oA@iIq3S| zA3ztFg;gu2FivDvUQmp9$$6Z{cEKIK0tMC3ZiC7oR^OWd9pHOwwE@*{vMSvU;Kv_;7^bY zQgEh3C#pQce{fo5Wt(Yj+t?!d^Y2^3LnKMLjXFAs6m(GxtQbM30}i$=3=Ir!F=OyQ zg|IjhnsFRuN=Y{N=A4! zrNrd$1c`}Q(m2I^D){Pl-8o;#5(ACYaF=$ic0696V&JRSqn-VCwMIlvE7iPF$^^^l zt0*VwuRhNi?~JQ%Rs(3`C^iULe{fE2UNBF=iaoy~V1sZWSrRO~{X!)87com4v+*>A zt=yahjF-eR_}|Ch_~7VxKJ_{~I6!VoQ@NCRop+ntp?M4JkonO2dZ)+00m8g-fNr=2 z1jIn2Z}x@b<$?N=*C&jcDb(>eNM2F(4bkPq?;4{YnPy#FdZhnU>29ny2IhuP(QNf$ z?dvv~goO&Xvn5d-_am=;u{V&gEI8l0XFJo|fDz!rCU&#+sEj^QiC!X{xfhf8!>#X`B6MaBg{<%WOdkL&@VR#v zt+ZaAe(a|;I01LdClm|rLkgJQ?+r`crXlCxdI~@PvQ@xA?6Tm7ztJsegs@?;hBlk4 z)RxodY4<67)4|fJUZ-Tfe-bw@;4HwWDV-?v&=oMpB>TflrEqd|Wm4My;Yg7*RHSz|Vs;Hy8 zQv9-bx8dr`Z34G6~j<@!_smw zo!49UPc1Xvc&HyffrZ0#36;DJ?$kQ0(P=#-q2g-)-6<0N5WeHFb{6aYz8NLup+1$= z`yvRKfh!Az-Z9s_3ZmCONj9IVoy9lp9<|B--Z=w#0HGn%2@4+i*;Tb}^)UfCuZsP( zu-va%2eK2Od{E}t^j;*>{0{;R?SB$zjcAZ(v-=evqKq)#!T!2bwGmdF_D(G-WCRi% z)i{59z`Xr+m;%wH=j)9w2a(hoCrb{`{+|(&f6_}3{`d|4Q3+2kx_l%mJH48WRw1Ds z-GjteSYpzZUB!~3TOgp6Q*C9Fp6+&?+|=1i6Kl`M0dCdRM{s<3B`UpWirTRZrF(<^EPu2<6kmhw5Vb z`gETIb3rKG#RaL@$tseF`

&j#ftbGyp2&inRNmS~?4{3bCn2y}tHG^R#+H;buLg zABJ!Rka)vaWvxYf~x1-04$)etscJeYqd)Ct~Z)yQ$zu)3& zHtC(aXniiG+Xr8RCPsj`#uG7@?25f}YV#AG17|oA_G4u=Ha?z7qhjkzb8|Qr4l#uR zl_cJ50e!&_Pin{vuVi#j14-~91743!koiohbh)KA6$;yNw5U>sH1VN?D#a3B(eHyf zt+T`RmobyYKuz@FKu|K6xK~BT#a7W{Wp$-v?SL}*Pp_p8{S^#&?yKeKY| zsJc5I2sP#i+3eBqc%Fa5#wB8_CsmodGg5!4?P-=HZsf2t@~YZ#hk;+A*?@R)9~oyW zTfKZ}-_VO2IAWTN8M&)Qniffc~ zd#<47#f#y*0p^mD1H;~GVtL#A7rt7KW!dhIRzasr`iOClLHvS7#@_~#H*#ugc0rY+ z?Fl18++W0<9QCWKE7`36JZ&4{RMH>jbEC0t4BD{OPoWrg3r49}4wX5B{szX2{2X-l zx6_DE?!XLj;86njo+IF*8YZbS(PUM8aEwpYYstJNVeN*v`H_3tD&uGKwt&cVdI>#$su^#WR)SG1PM89n)ENm>G zAaqCC3a*VKYATem;uFVpEDES$ zH^-gOi4YlPOBQ|S>{GHA9vV=`Bl3w_O0CBrp8A<(&5Jy32GsIYB=sj{ZqZ{IwNm26F^|Prk0<{|oj&+YMOL|IJ#)D*ykmmO_?&*j@y&)A9Z84o1P0N23zovX;x3o1V91*7l=myBe zAFA?ql$U4XgthUzP8`3|dwINo58d=843Nw+@@lP}9`8MV2j2gOAmDHSI038#cpsu0 zU+wfiyoeZPP#a9|JK}bOwUW4S|MMqMxpn4W$mvDj)l-q>){;&X*sAE{2=tE>xIoD% zARl~=#Q1dX>JCqg*AM$Ewf?JK)f`3Aa{-Tx2h5kot$692I9v8|4S#|a3EHZypkNvq z5t6{w9}mEhRGY5PjQG6)>zMk#*74n~w2bEqG^{8(AS`v1#^~`M&=jT*wd0mR`ZzR0 z(9EmAeYV@=9aw2OgX&~`^4&A4vc1rC1q%#FmZ>>D4DiIeAI`TOtn;kj%3Fu(xL6%_j)`q_>D+XP5MQ2K;Z`5T^Vap5b7 zi)}waLu0Urp8fx3XU*?pgK=!!%W}tHW8Cmfbt?B;-M^WsI_oqUgl;~~S1^s@A&WB%O_eWTCKf z6#v^i1l#{Xp$|3Qj03rB?;X;u)Au|=h&sm2_#{O1QA~d_tN)?0V>|X@0v)o;-0u?y zGQVrNjVz(4D48}!+%ruTBFNrmU+$_~YWtbez1WO141xtm1X@dw#=HiQo7lrr-PN42@+1_XP3ytesr?h zgu01mjnrb_uf#D{+sAzeE}aG^HGqo#e9&-y`$?8H62kdE3Nv`p978Oe62eE>Ag0^%Xs zzVji!GLl5YRK3_}o_2D5qzVmY{CNG)-{QgNFOIcFEC%Z;UIS<`WF&KClK_myuzfBq z-(2oVa|iP!IQC=RzQMKPqMDvTKd4HNCnJ%;!+AeMN)IGt`0K)xo;)YGCRV0wLpsFr z&j)H0gxMKOM#iu2Fwt3!PVqm6wK@h2>M)~2u3)iyXGYu4#~ZI+>xf9&RHFh$ zn{>xwnyH3k8XPTq^)>LLoAPxF8A)fVrKZrWlE)B7)mly^3i!+&hK`v>iImgQw$RfB z1kdy)%8lwwQR_Qwr-~fj(`&Qq)GMWx)>+iK9&?Q{Zv*Z`L!l4S!od%|v2JsM2%_1e z0;WqhvucEY7Em@B_$%mY+@HL+V5c76`tjV!=%N@)r=2z`%SF9aRY(nUg|-v-3@vV^PtsU2pQiI+M2(3LKt!?}r=QoO81hD^DEvfA>U5wQo)j5DZJ;_C-Z> zy|-kRw$u8&t~?FS;cNn=gL3vz-yMD@3s4k3E`ihT9}XfIl}cyw zdxEyHFVp77h4vhr7uqj!IQIs>8^t|%VhJ*BJQ9F|mg1ep;eWg%5#_X6XMFPeHe>55A-&2)~ARtT0r^L0Elwr*Vx(N;|-NF zPD)D3HrJN6>Xsbk*J7PK-5zTW&S!D=G0sX#2gkYvqbjuB+SvO7n5q;#IesAwv3PA^ z<0Z1bYptetnEgT&QV}G-Qt42r>a{jz4&6{VB5?RTcg5BQazet>n^doXsdXthm#+i` zkV7CaSPPCrsMin4Yi7`A7x$-d)1a<(wW-uXI4$8=A49oIH*3>;uxOig&gfqGjEKqI z8gYpYN&Os+D9cXK(LxMNF3Z85bt6#L=s5aZzMkmlq?0XGqd&mBPR4;(}t{E{sNphDwSB9dJLuM6VWME`T5|hzaL*8&3_&>*t3A}JoRNar!v!lSWO^Eq*|spjyWBj z7#T`94PR4Da#Hpv8>g12GdXrFcuv5&C{Dn#uD-O|Xl4MHV+35X9PY;Z-cxYI_n{6# z`g$Sv;alyk0@SwdZD&N<@~suhTo&1OtGpY>sN^= zD94+#3yci$u@-gMj#P>|@+Jw|Z1&2h&SKNl@E;5c zzW1z~1EHtRu#cpVR_eQh>%Nb;>7=gy8x;PhjWWGORpq6wewAhmWI_^ zXVh}W4V(YPJs`0Fo&q||bq(PnA~iBq0z~|lzWx_xgnoUZF#oeD&{lZCo+m8UVgqxX z>kYNh`zEw(b9<4gU@W^zCQrue!}%wG$=OVKDv{A0S(by zd0pOdrl|?2P2lJfFHnhY*-$RXly~Aa7)Qm(*&pN$AJ3ZNvub!8fam_^(PdD@EKQWw zUxgu-jO{o9t=v)O1S`a#kY$o1?jT>e0ntL$s5A&E6fwYot`0b$n{;Y7d2(dVNcKrW zR!}5n{8!#Pr=-BUaFJ04s-)a@5T1VtR{bm9PqheM>P=b#3wM?1=2NLRd=&spYy;hg z8LYCB63ZS`c7R)cRHipGR;-QW>tG5TH-*TvQL}qBqr{_Tn0y{!GEJ^z?U^x;&G!y| zoOtq!9*Lby`kgn&MY1kMAZi#?JdDD6J;8yUi-q8#|bD*oh;v2zBz?8h#j=$5@DX#moH?5;!BT6 zOGmwSX6-k$Mwv>JDSh+_G1p*VeXWy#Aw&^i_l0sgf%BV$B%Qt4Urf*pyx*KK0y85F z-f(#nZIkap8fW=sT_AkN(Kn;L^BDHY@Q(MLgnsh zWqEBggt*arqFK}&E$5zyt?wj*Rt8@LxnQlA!@snl+uVh6krPasUf(RSxX<+Fb5Ysx zy7a;>EP*(O#?hkDJVHHmWU5kzUhL6{7U}6Dfc_41{=~F&Q*P+`uF(7@t2FCfa3l@~ z>U!AM?RfKR^iX@OZ{m1;iKNFO(om80_(0Qs>s#WbvYbelc*~Gw0pR#@goU{ljg`Zz z=H5OANKWccB`#c7_+$eXHNTob1+JxsB>_#?dtP}RHTDU80A>dS$}NgV1vnPa_D<2Q8=1zd)}P(8Ct*Qm->eyFo>i1$(v{hy~KT_toCst5#nh`awyekr@sU zy&(vxh?XR3I^|USf`2Voa-UnMM0_#F&zYSQ(eLJX1N<{px3=GzJ~%+DP-zpM?l|{U z_hKdIFlsDMTh9<45>TmT3Ta2K(R` zzP>=dH`CsH_)^+2@&PmO&p}bt#uwqJr=(f1wid>^NdrE3CdSBvO!ge-eFey`aHw8> z1P&*p;WA%>8CSa9 zV;|qPtVjq%RLrqQeIt2xE?%|8wIt%uH4-xhO4)`VwH+w^Jb7L)aRInnF2MZ^^R@xr zTzSJ&^8+|)icf&-yrd<%CR`gmfs#9l9fX24-k@F@(SNHXmFoJT%<;J86jfsg+y(G8 ztJ8N$c!2*n>Q-isL$&vrczPwpq0JiDN{hNzg$|Ct{+LpjsTS~@Z%sxhJ<{G0xINRP z_s&W1=f$Pd{f_OO@=I%>I@kFxa}ukm3F!u#U(w@~2*!C;YgHrGM>l-kChgm*F>pf^ zb%GxGNh3F8@YsRkN_RS0d0T0S7I<|K^39SxI9ER$<;Im9Rs;(!g;YfX9RWb`g4F&j ziMi`FRum>%wT?tb&xjqx?h|k?MCW(5dL&mc`&ZZcmBQcO*0guPZ59~LWDgDwn)JmT z1v}P%WNeMjF7`dUi~o3FAc$TEkMSOWh2$lqiMlbCXTf5uD@Ci4?c-UhLM8?q^afh2 zvqQ=`%6Ws{dzogbMjOsDQapZOah-88sdFl$)CCr1Q4)4t;cyUsgE0_DJ)L!}*~4=Z z(@ZEV+A3-6_C?8n0f-1Eg2bjJ zHQin=i6HNyh{Gb3d&EXx)1=MkChRiepr{1x18&f)=^6VrCyy(8|M36e>n+2o-n#B# zL6A*1qNE@pN=Qqmpp-~UN=k#$-5t{164Ko%-QC??!lvWDww`m}_w#;uzj+;x*R@yt z<{Wd(G3L^~S$i^4uqg^o=r81R6M({$^Mwn3H7Kx!{2yZmOwlv6P7KT82W;M$x_cGQ z#K?3IwWoM@pQV=H2FFjikkMFIgZ{d$;dXBRVm`lK64+$uOl!gHb56q^IHKL{7410A zGkqZ0TqhNtuf_h|*Lx_gtLuFJT0{9UtQ|xr!K(?n@cYp8F_)N}SD-*QKF2FEHG~E0 z$j3QMdDT@tbjjk1mi8~t{wBwqkmepeR4R_m;(x-5MiN6cD0hs=Km3Tc=1z=y_WHHd z^GeaYr>AY15U(syf(Cm7YBR089xXV`lhZ4wx=gPAS0DU!}Fi~Kh`A-J| zf>6>-prBwjT9vmp)PlCUD{fZlSPrG2q+)1HDmTIfb09mq^_1@9L1O=!vWWT=4i z@NxZMmZnw{H)moAY5tph__m}72Zt}+FU;Y~Qp6*8>naoCr9c~JYh>Q! z45E8*uYnlEkJk`gQdQvGS(y7A%J%hmNuuFzV?+z`>SB%m_X7XD$;g(dVoA5>e}xC` zO9VrQqt5^OG2H*QlcB!z(=cW|bRmW2>gB&|rO|)2BII+3rI;kNxOfcc7%43TT93{D;qr7g%0qVUSTeTWfX=j=i~1C`+o!e zeE@VeI{$^XAOLm4lht2GI~`9W8NJKb)_EERrF>qgqT}yxMjK2dCJ{kByfbI`M)AXQ z#7>ldC_CQuLf(%-ZkF%q@N4I2>n+M*aQ~K$dvm@@lkG#jXfg?X=hB}m{Y!D_bbICG z$*;^7t)$oay<#bxvS0Gvl}!3d9Z#u# zisf1dsj$pM-~sivxjSOYjjA4glcCVocwN({ybH_!60@)^fKbCcCN1jD68~viL(k25 zLtFd){~ab%q(IO1LeeWbEVyDXxNhs3)Nryz&>Ub&P}01oFd>&dF8odv}diWuzb?4#SNTv=g@0hn0oV+%#Lti2p?@!?r`Ky%SX3r9=ZSPX$eYVQm4-x z&v0Tn!?uWY+g(JT1ttKaGqiso8#6p8jk$!C56QUWm9j+pHFuVZ7>Gp!W*{ME9t8q>LW~m#jd#mBh#6Q}rg#h?1>R6pXq!iGv}0 z>o*GUwxD6&a7ghC#YXmV`rOy+2DY8hKy1hlkSqjtzM217 zWC|+rKo2XkCyTsKF~+0aLjYM#8q9yt4d=3Wskdk;byzfXT~#j7%yEzE6$kBo#-GZ4 zpJl6kJpjrrpt)HdApCv_0IKNS(c1IT29V7E*f_X4q?ZsX%|00*1aUam_3V;61v01cvxJ)dzq6vANo`)93}w$;4(Hl{%n#B$`;fRWO7g`7BY{3 zY4*h@>>b$-tHG&9n8qkoMkMCDyjy!na7pevbo5E9dQ#(Lg*b*JlV?sAcEu%k-=@FS zpEWhly1HmYyI#g^cU0Z&@IB%BO$R~+JmdF9<2t%`w`uvGYT928)^)M)RSYyl-lxE28e`b*&CUO|W<4!%yIVT| z!6e~*-7bH6vi&y6PgRazRUFypM&KXL8RvhlKMj;bwn$ zhiOk>PIxC&7xHzqz}7zTUndU%K7KC8;b^dat;nN4YTpO&1OvoDCy2ITcJpe`-1 zzfHWizP43=3`ownR0}|RGLZFe@Vh7Oz0&DlhUUct-u=iPr{~Hl=^+N`Q|{4ZZoKR4 zHq?Caa`t6bLI@!g_i}ox1fW2KY_~hDffer)Kuu%^2J&KjR8k_eyaZj?DIH-@5DADQ zCvxXa(2=@+Ry;YCE-^a`y;POj> z?d>Dyv%b0iDVngN;emx!4GWJi+;K_~l_Zgt2u)i2RX<)xj?&=7^^nRq!fE`l18mo$ zd$Z8<@~kXV?u6&2TPwhDl938ljG$&)K+dwTP$!rz)ELXyFD)-70MJehr@q|+A{w^%cp2Y+)a#Pp^HDQn!sLDa z7Au%01I&SZ*B6Vw@3_t5^r|nDi=P_QqQKD#NsQYAAbGz^q+Sv}_1S2BQra2f*gCoeySJI+yPz8DILjdSv;{Q+Lz^&PyL4ANJwx|YTnZcn z@_DvHh0%+Dode+9yh55Q>38+~dzco0kfi^tnhnUB@=62l zwVyHp?J~wcefuNY)O(+v4=SO^KnO2YwXpsNTBREOD;Ecb;O}j1ia1YHDyViq($(9o zY^bj?a^fRQM0MMESwu)xAfBmvyCu!h0mu8gt+p zAO71R`C*tJC1Hq*0KZ*VnSzN?^c~RW(PDTpLkF-l)kalb1+Y`nv!k){Yk8II`K%Uc zK)AgFi0Mzf8h|%% zy7YkQ(FL)9qZ=9Y)ogGE&N6QFmRp@6(yPnp?tGsRZ+2tA_*!P8Y)uG&n*OT8+U zj30SxWMx-j;;2^P<=T?FbM)a%y{PedeCz%TUgPP0Lv-65O{SEjH|?w@Bv_K5PXk*U zLD$E7&0%9g*UL*W4Vqkjfz(N+#|xQduYlEmW9^xE7|gx@;|GQn z*(xHuG=YWQtg8KC2oOW2u#OuRH7rUMgeGIA#3uDn;8!omoNbfga2~xO*g!uHZs%0!!q1P%g=?I6@ z{=Ly>wIXKhq^cU)0#x^K!+QHq+X?1dFPHDI1{ z%p>1H158}Ja>>UZ(H}csHQrnw7A0)_kYJt%V@+Vuq^dbw^aLaf6KOZ7s#EhVAzJftJnjqfFI@U)SU6fWrrx}q^}ajN_W}Q-n*@lJ z5*~-Je}jec2cVLvvD4-+GL?QCP1%zA07tCkQ~Xk=8VE6L7JM2#3p>9sLE_PzlJ(s$gfsAfQFuAnpKKJbs{}1${@f!8%gMBiM}X{otFp7N z$`Q$L-QsdDNU6I9=un-<3nKLod>=v(!rbn#W+IPu@s*%qTbSO3>J^fp2PtQ00gRf; zV4!6UtL(T-JnPF_v%S1km}o+v7&(#0QV1{s&kw~5kJX#9!)%{-+7%2&Em{p#8@$)t zUSLshK2*B1>N?!*!LIFzVKCo4#GJ*U#kd5IC-{M{S00ROsTLx@fPI{I_(QG;ge_VM z?4Mk18w%eq4fOb_RtB*2h}LCdJ?YQ()V6K0}G}y!;#QV0P?Q6q>BDB9S-aQX}9xVvI~r5 z&DY*Nstr0F8RU;<9FM_Bjhx4swT)(&claq;*EkY!zY&$W48#_kYUDKB5;i%D_os($ zS9N#AlGFP@q1tsM;ra^X)@WYhk)qAM*jLXOYr%n~k&B1Ovo2c*DSn**AG^6h8%7+C;= z6lnL1RK$RG&(jRGw5m#2Tjm~xP^EoUUa{oLRb)CiJevQ@7N*b6($NSbpW^N^36nGs z0E(QBkS5ihVyVYvsJtQ@x)uyl0wqxod|lHl-JfY@QGTFy*W)IkUsMv&0M*8-3ek9xZfooF`6M7P8BhsstwEC!=esD{gtLz>k)zSMYro3|NTuF z^5-#y$zF8+rdHVCl3a?^``HR0n~BonS=`lgc;X#Kr^N#Rtj6pJh^IF+N7G{W;pg|z z=xrY^2$#(-E6}o=vqba*+E#PPCNTN3>mm)l!`j`PY;H6%|7 z&tI9wD7&|@Z>3C{z$1W40!Ru$Y`#j2k4b1NYuU_jUOLN9Sddj56b}sO2&d-4%Sbk; z-rAZMZI>F#H2k~@Ga zuvHNNyqcD!0rOK1({W9We4X<8u17~N$6Tulq1O(Pf86Kb9)5J!3%oZ*2~L9E{6! zo;25*t=axspa&=vW0c9lexd~%F5mZ>wUOm7IrAkXRp$ZC%?uA{%{dltHok(|;R7M} zf`rL#R2hD4uHgoQ+@OS;Ka%?~`6VzlG80ZGN-E>9hszVb$DEu5w6zbv81u&n}T*fS}nMzCBy|CnlN;kae7d zM&4oAwTGI2-!;F3dIyuZcnOrWl99K;!kW0K)bed@{m<)1{$G z#aI8I-9;Ue;em`#{&n)Hsm@f4EJvnp0TryaXOAK#e zPV6qOp4yBu9)wF2U8Iae+~9(^0W-ry=cckLDd>tXR?L5y1?HaLvyP;{9g-rR0*~y- zsCGUq7GD5s`dMo7!d*RLptNspf?rNns0Xe6%T_te+Tp`@j*{VwPnQ-z zw*&l8H#T=LHL_b`kdHcnBmXtS7I6JYCg0^(1dmSbKJ2?a8W@c1%(+RfZ;!ycirW7y7RE-`tYW`p-5BB5_;bQ<-ZPw zc@{1{X|2{41+xVe$g6DG9y95nuMX^NCV&Ru4PY&QM;0nL^;xxoS&&qqz}!~O*?*;e z1A0e{j88|6u<)rOvirj(EKP+?D;%$PdTEp?>PsH&dhRI5gE1~bV z4S)%4ye{RhSbzwDFR5sP&gGn5P6pU{dx2D4s#D0&;MfCmJ*i6bb=DYlV*$@i-Q!HN zdA|?w6yIQWet7Dm?)zI|zMH|tplgl~?)?Uu_8eL|CA%H&Xn3=1NLPPGKtR+%f=s(M ziF*Pc@)+|m)M%lZ#Aw+ z)owonM0g`W)(^Q5O+B)6M}f}{d`Y-RN9laFr=ogT)GM+Jlx(znpX8KdBJ_ou&DNNpNype=j)q2i-k9G#fp%$-sQ`UFR({_fmKy%)JQf zJjE`DDBgV6!U8j?gbG@83`{}_06dBvp6p}gR5tf@cJwZsCPf#SkLCFD+^#IrS7ko~ zS~^YZW7Ns#~3eD&`3$Qu|A`NI&SN@QTLgXIP+J0!%Bo4XmxtpyxpgS%XfZ zKy~EPexer+ilZC z2)Lhy5eMJ*=pscJ9bJ>4d=_1LbYFM^g8E9C>M`itGc}#G5rQyv51^b3w{_kF3;=|4 zm`~6A&3v}*EZH)>k(O26KnNolOq0uOpe|p-LSuf|aFX@vqPo2AG>X4QwyD>#zyns2@F_ARc`xF+V7GdoC`m2UjlZ(2hr${ zfbn2Lk8FQVTRu)ssb&kTdDb5DN+^{Ms7jNpir&;)IvY~^kMGJ|Z?nPdK!tKuh2y<| z0Ut#8$K}GdqZxBS#;O|$n88BfkHJ?f+6YcmFl(AZv_oU=MRETwJ<#q8={?bW37g~f zhiA)CTN=GQ>_R%c8y_Rj929uMeJ4Bmnr7drN~etHWX!YUI}|C??A|m!!eR<@^<#c+*A`~s-R}YaXRjGuCAA0oUUZn6)kCFSCZR~2=P;(g114hn&I9pr zcZ)y_c}&cP=&in<0i98^F*9Akq2ep?1iJLB{`gsYRIt`h@K>JLyYlb0p}FPdcN$SC zx5+P@=QA^jSc7sWZfhWLn#hvGj%8ai>$4T=XBjCy!BumHO17t-!21CvKk6X_RhfO- ztuR-=6f_{T-Ja?FV^aV~3+Nz9d*~R@((+f7H0?f46kWdml+yzqn!3twV@=n+1z!#& zEbSj6_P7vkFZhQ_6fN93yN+U2x5f@D5tWHMeu@6@f)#<3Rt~nRS1?T^L==qEr^V;A zVrqq;fIxC}9X8im`5U-ghw#v(KRn?^HZooIbAXyOU?x5LAm=Vcq}c0FaTkVxk-}sD z>#HqS&QNv}9esfZT5tku86cmqkE)t)n;N78WRgnPD<2|h(lyoZ!l_=R<03fGrx0@)hTHXrr(r) zyQY5kQR4NFpHxq7V59M}+0S8(3lA}Hm`6OQZ#@J)ASZqlWX!NSz2oheVTjvo+AXcM zF>~mTy{M$aa^jOyHbAQ~!8h2X-$I2`MYJ)Q&0+Snwy^UCN;`v^LmQIX#6e=xSM(Q8 zs`lQtHlxfrUx$hs^i5igrYudStQiP{L8sT_34sy>&a^KZ)t0Qc06_uL!7Wno(aeP0 z!`u5f1BL0P*+WT|o=Qif-Zuz|45^DLADSJ1W7Ei-lD*l~_;emK|eJJb&i%4J)zr7Gv|Mb=iWOQ_W*> z8Rt|?7#{W(HX#-q4n>=u&Mrg7y(8Of8B0Rc^nCJk^0ZQ#;mg}ve!P!XE6ENkFLw%C98daEGV*uG}PMF?rsE zQZc`;PHD(GXqFg#?WNAy=8+G!M48pEiQ)>^oB5zqRmli~kj}%_tJ6|+aY08Ig z|295sn?go3#7FcpmF<1R1lzjB8SlP)wT~ENekHn} z9EQGsE)b2_^>ACp&dE?H=TNhJ8?zIaG)lzr-8C<$Z;xS}(jO5ngS>fBvdln=rfh6f zR&KQN{=mxNkOuoR&$b_~r%g(U-Y6H3)nEl&e}kW7<)K8@EENQPIKWLwp|3IE;q{o5 zEgu0kUvnN^Z_Tqp;^az> zI#v1)%3Wq&78OwhVkTRe?1rs{kYl++BJmD|-VHOfMdTDQ`d`nWP&4!9IO|tEHGe6! z;Cgz{yTixb_~)4CJs-#@Uqt+xvBa9M7RKLDP%vSb$-~kYvLhcEVd0MS6(K<2@_2w- zW!`x6<99;tIRb^uk>y>z=}a;8mJwC<54y4uNeoP)*VJ29II>tw%KGSu&W6JK z<@@DVjP7NnXafN_)n5+N7pt8H92sTRP-7?pc9%*fU$R*ytj9_Iprig3>!%$^CTk)s z?h$pzrz*#y^^7HgU`EyOxQxyF%uvNDxp1t@bc3D)5l*nV7*c;b!?N~vjp!r7#%iNj z!?y{af>#QXe%h4$px0kc&IHZiIKJhO``=VpFiJ;OGu>)e#P*Kf<~-XbhUSi2&GM|5>8zMlTT-B!bu;D#vVA#U64?{C9Jnhe@H zIx{BfDk}KkSz7;n?f?ZIqJICAl?y>(pQ<=%GZgq?A2+P_;WLp;UTj|hXY~RKFE(c7 z+1cs7=n!`)?^0&Yl&D|j$FDUb$RSoV@Nyc?Pf=YGpL{@6EaLy&Hw!oE@n*SkO#x$& z##J=X<6Zj7(er!GkLaOPqa*bkfuUuoB@upMz7H6MuFg$kY;#v0-=8Q-&K-W2&WsWP z8%$Arkxka^d(4`zSynnvz2|xq_Wtvnc{7%#+L?Ke3wV6`v8FvL^?~abRoS&+WnGyK9Hx+(t3az+?Vh9@{z-z_S-u3Te8SwoR4 z##kzE(cga$SCl6>ci}v7!4A=YFnzKQErrU@r%kA;14Bj%=w{C7xYKVJcvFz+b z{hj%&!z{|(=i&TM5@pUdRoMD=K7)uai$%*Y^HWl)==z&zphE42=6y@o@TzM4rt9$j z*UPTY)c)K!Z-e9fO0J22bS-l-%h3p8Z6QNU=Icqp{A?-TgsZBPLivDh44Oy;4}nPX zqt3%X*hb%!m{``)KjmH)5G5=t%F~i+=oJdG`5Xno4iI>r8!(wq9cYAUPoFz~GIakv zW|3cL7*nQ#x4e0}rROt+4^gH0+gl%noW=4-XGyBvRmGG%QA25XXXeBSlVp|;eV^GZ zCHw7sGA$46+s4{F0F;h>Cx(eP}tuVs$@sN7F6jjSY=#-}9%w zx0rlFevBk3KXvuc5R{#MQQ%=TIO=KdO{FCjD%;z$5iqKVZv0H@$?fAeuYSxk$na;W z5ZO800-q6WetSa~`hjeyR)BYr2~g#~PG#304iB+UrPe6^r;fB>}WD zmHTp~Dj(nABc#K-qZ=zNm)y^drBxEU_Pkk=@&K9X!R|yp`7b*zg8LhtItyLBb9lxe z6n3?*q~TeMb6<*3#XSGY0+a(=C5`_R7@LR~p1@EmQC$kJ1cqLL%5 z;|CvmC)ESpU~$+FA$;hL_|4#R&r6w~Jv`7AF&_ePD;imoplqrNECeCXW=~tq;DQ7W z+WS^q8Z_Vj2{sNKQBghR`coOZInu=IP-DxXv`(+AmGC@rEhcb#*{2?c?UaW$+iJS{ z7^XCO`do2ou8rQgAcyc&I!b!Ho*u_}v6q)9jpTfV@r9X0Dyu}9EipX4Q0^c+m>i1~ zVZ-SfkuUc5q7AA9IpU4(RoW(*V$nr^HfdpD?oreD#&L6tBYSu&k_XK`z!tnu-1(L6 zLb6F~c)viV4(amz!G8opn&C}I^x3gH;>q%x>yr5EBD+?Og$Qg(1>PBoBM{qgo7Elg zFt0zZ{&JSZP1Z{4c%WC!#cwsp28P1&hVUN0O0>E{^vTn-RqPT*O;3u_v-tk0+T|8S z{{9YihXc%8sjlVD!yhSCktOEWeU2k`7+4MGNEcN?^#b3d_7iMZtr+30E7+=+16bs# zrbd=7oG}g-P%31vCg_PiD9Ht1v^_ti>UG6IfD=sJg%qRK?lSVL>#;hFX{~K;TJg&D ze4Z|~rkzssy2aokeylN3;(b;tFTjkAPzwk5ET9g~>P6`{Nb%tCwg{H_LbXj5+ApZD zV`pVQU&L_y<<{@g@cEt5C{*Or;heZYeedEd!b;`ZN{ZZ<*5TzIZln+=09!5&5`GFA zSYKAyK4YUNk#LO@LpXMdjK+JE#LnpYATILkikRS=yI?Cdw&Mf%Uc*<;%iWHiMlY@@ z^}kjB`Q6qC?*nXK{EF@W$elDWL20uTGEBq8J%r~^2f4uV!&9(CjQh|CojH+jFt6Es zhxU|tZqg^&shRpm5`Jb+xz#>dQ|eWfw+57{#(n!m3o&ls)J=;xsVn)4Z?LFjQDZT6 zHoU%sjJv?h3kV^Od$u9=p3x^X$E9atVG5H7npHF4xKmA@J zF77v)nM$u}vmA;~^gU`66_e+)dc<;3Ke&tH|%-NQ6>z zEPQ_p-}L@^Q;?;j4J|}UHix>1nIGkfxN81-`l64o@)Ye&jeU<=`P)b5&oV6(Z3>e` z#Wg$xswh9mslU7PqtOBHnOWs9SRNxUjk~w_oheYI;&9k_muT*>240}-Ksuisi( z-y}7u+G#z1nm5N)P9#eK?x~xQ+k9Z(r43hD$LI%jg=sx`hPn-mr=u7G`q9>jABYf1;w%Dtvq8=V6NulW`Grr z>}u1bsbh6?-~tgzKmN3hx!1wPZhfsAz@@kHOq{rkO#AEgukb`6>Yt*Y`xr%~q@+0N zi9YH+CkK@tVYm@MBEGR#%L!W?T9AY4`|0nI&*D(PPkoK86%%>bcub7lR>FDu-Lh7p zw6c=)o6Cj5`gwCiQJ}TZ{P(D!U%>zwEWcIjL45+RDeWxt({J`=vxtj_hMf*qUlP;3 zyjnGj8=k&L^_R)Bvw*!JXH`kb3}(CUDVJOBS`?BV6SbyKmy`ITmq1B&voTQV3O;k`9%w2|; zzq_H&CAfu%y-+#dCh&;mR8~dH-&XP4ZK7GSNweu^uFmbLgEkC9R>vNO2&_zbMq=tn z2z{)Y=!STMH~JKmnOxnmb1AHuu;8bsr!Vv;`o52CWnV7-eB+B(`u1$9?9k^2TDLsF zWI_I5n>w5rKE7NfPvw^FqaG(1xK*q;x{w=qEiI8rEgRUplHw|la9l`&C|DJygz+2Y z*MvtzpI*h9M0K>2*VdB}mQwgGEq1bb?xK{x*HMK0=;Y?`-MgFT{WRNooJ)uV`K8*U zBaXpwgxJdBRIu6&AX5&zO{l@A0_nH<>}KTQoC$bWe?I1KkU85~{HYRWiA@|1iFPx> z4r$KRX$6qKC~)EG8U!!t%MS?W=5{E+`T;m!9K5#%}iHjZW`uw{6=i}UP z85t3+v;1vP@ZV2rkLq+KuA*1*dboSOE1-k{YcB6*#ve1T4J6|(Vl0g-wROekcFzL2 z0}|ResNq?%oZSq`WRPOD&A9dImn;jY_Hn|BaXvxb zsmkAHKN)yBA^D3sggCTMjzQ?xQrM5@cO;-}^NClGsG#+kPFP(DE8biX`I6#u(L$;9 zPVyjZBgdPTz3-)APJmZ9t-gDALzPC>%=OWzT=NP#vO9!Hw)ykDXQ+UFbafr7h}Zzd z6F3_s+uQZ*JnA4)Hp0uxH?^z=|0mPfMEn~p$we$}QyKJZkm1f$+ViRm1}=V#UY?Rw zBZYTyVCDJ5(N3^Dr_RKBBI(5x_A4K2Li{t!B~3Vt?vR2McS|W^Ga9vK+sW-Mh1|Kc zDC{6fNupgo2M`wmuP&su5fpm7jZ{<>+d0lLPeXT05Ed5b0i3@)4vWEBRj3X~?ESsE z@qlz0!Dgz>y{Mr%XbI2@v|Q7aX*G1M;~ZJWsJ%Pz;hM5F5#4@#n7(uFZ87u8w1?Yo4Bz#7HT z(!7$+?gl!jPJgu_w|*{DsT+$;PiKCmaYe&phBFPBkyj~^@iR~D%5kztq~O0x%PCp|$qo^Yz3g&BBBI28^{INE z)qv>a($c%mo%n2BYRIWPg7WrY!2O21Qul8o6@dB4hwKriCq`on(8Is)5-}!6Sx;%?jB?*R<+%doljq+Em}hJR@70tKu^6)6_l@Bk9yjYARhF1KH8a*sP^%@B<@Ne_w!-v(AidaM zNRL=I?~k3)v7Ev&hx$^5Ba9p(LI*E|C$eQoEbor&| zBWJX0+dELDxnn##x*kb6B6#6%Yz$Q*Lt8X{Ram#)f-x>efeKPfFGTWoOUjA&w$&}c zAkp~+2LDE+oAFl)aAQftmf|`O+5nZbZ}$u1$5MB|u+I6u01ALQgqZG%3hlOGMJs9Z zyLlIyO1J3a3Q^U2@!XNKxT|T|n!!8_G{jWiv<~-(@?`jUK_MT zPwP@wT$D}9^4yZx1<6mc3zR*79(K60`rZD_U8`GmHwBdxV<2$Z8DQZg-OB^!d ztv%^j^EGRaAMbr_HAOpT*O~FTAONfVv$rbB?jr zs66)ctSr)+h7-&E{(Ee>eG;J$6hzo?1?C7WXR$nZK~s>Qa;Fp@6jajsB=0Y7gNoUh zDOu`vn2_h%zs zXR;lEG}kTsVhu4Y%Wq;W`ST|~Koi3%&*tzP9k(Oy$xm_V z_X0+ADwpK^9X53jqZBG~a`Of8_iYd8pn8>?e#1S`uC0=lgHEIXVi8t&V?}zoiC0qv zJ9kKA)B36&AMs+or^I`IYf^Y}yz>g#LqH5$xx}P|!jHprXwug3#39jc#{OdWovXQv z$^c*)1q=|TvM&;JjU1o>i&g_Q>wAwdN!$|-9n#kmzAAux?eTW0Kq)!hyjGK|wa8CxDrY;hc1oja`Q*zyd0D{<-Kiu5efd2+UrH?oU#h zWEuk&F=&kskCB!Fb`Mah2?io^r~}^3Y~>cVg(#7}g_@&AzMHeqCNfdkxOl#%c~6cb z7Bn0DVcp@e>lN1ydp-&~w{Sa4*!veJTP)wLXY|%=-*7pTqR93YB_b(axjyAxX~2=< zBxfOIrwEL}Q(6f{NiA*(cKC=_*JqL%uW>?}%0Q5=?57Pa?Y&2EXCnVltVh|H@CF~% zH4)#+u;KR=ok#5BQNUU_(7_90xA*bLxLmM%4+qNqGGM6A=8ODdWNW!Bt*3vriCES; zl5crU?A2Qs?SEM8M(=&9j9@2ece371OO>L(jCRg@Rr(tjXMj2Ll_2>HmL0Syln!UN zW0ou{%bq4Xy&!mmRIvFAq*|Rpl$Zm%OXN9AT7H-A{#0F2VfvFb@`tJ;H9K#q18ETA zgut739YW0uLvFK$+?W*=<8b)bN*q^oA)nG?B_(EUP1`FU9|*TbSBT&c^^TR_qkd=I zc(9iEjZORH>RMB=6MdE=?P66)_EN%b`%9(SwmMv`LuR z#R+($Y4yFCOy?@Jd8JSN%e2X~1I~N>THNe@E18sNWw&}!QK2iVDf&XK&E%_T;qJwd z>38bpM2vKLJlRKxiPrpgF*Zi(5V?~Ex1)^hDvcQ_^ukmwrqh;EqM zeG5;au$FYpuHE>u{Iq@ez}PV)k7P9Y#A0-^q}$|?OprIr+G$VgZK(Hzk`d=Dr5-Xo z{j=uViP@rrt5TbR+xo^rvnsxySLHh?Qn$6+pXBb%x8sXk1)M5E*p?a>#^!4jp&FgbL zbAFi*w~0l)=@IN2C^h2@@Nebt#VBR>(#$Vvp)bYX-9Yxd5jCju=4nQqvA+(~qZvj> zdr-YGt$J@qj81#EhaTy4Ny_s%*?6jeS-rYh*Yaiwd&~JU$*+5WQjy6)QubUqJ$3;J zH>^eaX~uPXwS~9?G4*=ev)+l***LL%qbg+MUALmD7#)=gRCKeKY9gqrQUnx%{aOdH z{qhM!ofi!C`abXJL`tyQeH@-;GV|iVZHS}T5|GC=GBTHabA0}2l#i+MI{B@ZTqp742da!g9hokn0x(Q&*^|r1E=m9OY0mZlS(w_8E(N0XMS*h zh0F8e+6lXXRaV?O-q&q3L$=ms=KE)zn8z`HE+((pQjzHTun_2pdS)Q$U= zbN9aAK8HwH5VriSV&06Lzc$MYkA4D{kpiYWt!?Z_qUFkm<;nKLG1H8 zac@|F2QgeI6co;Boz?v~%E(JpfdTc`+SDOWLy6Aw>4rc(LIoyIcvBmMw`38-60^ni z8JV+3v=DJ^8gFzMH+~D5?6D3Z!YY!r?$V7_I$omw9Z1|B@uuPF@6irib2hf|oXq}r zJ;)he<)@Omx_U3ac1w^NcP==C=Cf=r9h8X^{>BXH)Ks%6i%&xhA=zt!rFezr^=lQV z@xiS7ZTz*B)*{AMk<0C;%+-^Qf^Bop^QZUyzro)wCU@{HOif-UN=+MDmfsa6T_3Oo zcDf?PTFJQ*eHlE4qLf%#20PCb@f+Ab_SMyOC@R5`@E~6b6_6oJQE)ZcYt1b(a;{Rj zKMmd7Z6&T!sdu30t0q$#o%kU05t+m@XpqZey0t7f+cu(elbMZy!}*)zpkWXLeLC`| z+YWLfdM#a{$j*)E4ueIzz3T&NURhL~VppEU*X?qhk?7m9q)1~7dzUP^RDY&AUc8*8j9*O8wS&K;Yh=G^49M`OwHaNk+gaSm zIBIxY|J)J*OQ!PFWNq;RvhoPS7&>oGJLR?_qanPW{-&SIZUxL+y+Vn@* zo!`O@jkv>FXXpIwY+I%CP5lJX<5Fx&E)A)p#|bocoj#EWS3FLycJyEQz^l})1TGC= zS;-x+u+ll(2E(HsNwt}?;KYhAmyWDRPr4}`I{K$|bkp#M=Zx>ZSD8Fbo7w(lE!jo5 zH&y9f*>BY3%uQx4o7*#hS2lR(OHp*_Mm_S=2aLllIF9Ytv866n=?CBU+w^X+SB}Wp z18Xl0DKOn1;KcfA9@@*18Jr9zA)63~BVYaYC50xTWRk$e?)vD! znf<8toG*J<^80ZOOOB|))BU~edgh)>{ntZIq6-O9j& zjSbgcX(2etoCm{kIK)ih6N?IQ0>^`zO-!@2Iu=OuITyXX6ndV)F5!fb&(GYg|Sijum=gIy9H ziI=ao`T9#>tWuHo!wp%veCzfMZ^n88W*X2C844KtT}<@Y(Bx;gZ*LU;S%v74{qI4f zd_l354?n8R(9*Ue%I4-Ke`XWaIjCEfp+b)XMh&8SIG+Re=q@!DSX5Bt_2|;&Z#m)9 z?v~>V>+_vFci?WcwEL5$@*k3%jz?$Gssl|Wq8#!21&y%LaO2Yv&zOl7yQ`lVbuFA{ z7vvI8LA8SMZu^*%R^FyoYzphEOWe&(F~tQ%_xL8^YH8!bBg)Ye)|>@)UBCXE*_9t47+x8=@gpBoF zRPEt@giV_k0K2E^V>Ve)d~?A#G24&9p6u z&K4&-*{=43TAZZT+#^x}EGYQI)oTR#Llyf}^&=QpN!kKuA}v;?go{FzTMp1Splk^P z;Uc?&KVvyLQUZhD4KUpbEc!}{_WcQ~NS4`-j#Jd&ZTU^n(O1Le4`o0??ic$;Kzqk> zO_vMY5z+~aHt;tYWe*Kx{@c_sPmiwnGA#?R~RMo;(^UGRvJ%?wz1vpBI zsEg}96D@UH9BsztA0jN|Z5vYqCp38Je(@mfa|+e*)l{v+VxIN_Vnn}1$!{>lWZFgA zGd!1%8@G=YcNe}5J}>AwY0Ajk^xeZX$z-$p=f@XPa93SCB}sNkrC;Esn)jM5zF|)V zL$q`Lfg*L9)CI+hLh@;H@Y?CYB5Kwbq+Bd~V?Z_!_XsCkDh~ThnN3f@6&b<{O#*d%KmAfF$oJ0H(1xvzVc@SYO z>*>7WYn>%;wD1mb1HxJo$Nz0$KqoT;okPve0-ExAB{%x{No{c8DZ-b(zb!ERd0zTG zDL+=Vr{2u5wxQcsEGb^K!jdeq{plqtK~Ac(~(Ic!VbH`$g}(FFq@u{F73I$(v!R)l;w*bgO0lnpNSwv0FF2f>t1?Z0lJ8o-?}NK@LN~h}(+iQ*Q?HCottj}5Q&c!C!Xfwj;Mq=i-&SQpegw}#-_DNm~uq{_c~J#tYkTeCh^YJJA{ty152(CVGVvGZGYjzn;6lmGY~=LD@esoh{LB zeK}iFUXe6h*oTB#uYK9#YF0F9@s~7BMAE2Ij4GJx(ZbY_e_9M!+G}v=N<*yj!a=mS zqqiVUVVk|FF5mPRGta@QaT430Fxz6v?x6Rar9MGFO$-8K*|EEw7Ml9p!XI|tp%k{_ zsBH4^&3%vLY3%=<$b+Il<32SkF=ov}hGLTRikO$RL3;*}$_-YR@Bm{7H88(?z6isB zi{OYd2+#i$A@H(o%&gHqi9@Q+Ve;EdjcFpVw#7qL@2b9U|K%0gP1<_{TlQnI;OF;B zitgdb2EP}77c2E|k9X=SC~Z3aG`UZ5gVfkxISfkTdM}Beu;RCRbe7fD=j7wA_|+il zOAM^YVPWCUANZ_41*4M%`3Hvl!ryoq(6q4*wFSmG6(9B0rC%klYEj zSQiI`Bmu!W@=aUrq_mMP{2)Hno!9&i@1cfBNL9P{tXm5i#-bBdt9j37qC3lINPx6A|tp@Xq@Bl-a74sWwT_t9+4V+qt2AIERcae zF~(LiDD;+>(95 z?J!{zlVe5D-jeBY@tY37I9w*0tFW9!d6N-!^CgLHmhi6)gaEOdo>uf@NFnE&r49B6$F1|w~Y``4ymd^cs`T_M0sWm&(QBP7Tn$5efB zqo?kRE{XrM6E~vpb$=#_1S?=6y~tVgoaR5AR<=6}&E-*e^W5=9I?epx5p)zyo#}8Yg2)xA;0__5DJZeZPYl z4vbdJrJb-;Z}gbx7ux60ggB4spz6XV;rY$dGSNl1oV@LX`6b$dG1Hyf=2w!5U$T{5 zJjd?$6sjLnW!!lKD)pK{=|)>|wPyx{0vp`XE;$M@Pq+-cprN7^i%GKHmn-Z2E=)Dp#V&7xUKYo?6Kv>_YEANSUL zXkUOjOIx~0dmQLA&!WAIWr{>>|5I+i0b>32yMGVo9o{crz8H=Tn?x0yTUwJFE+`b} zTy=MjAfmkw_Al7|ExiH0u$U#L;hldEGgi;UxWjecg8-5gxc-snF*tq#jOLIuo=#N@ z6X0E)5#JcMk!2e-R?+ISfA#mG0}c)tFS?mP>kqmC0MiYhi5{X-&0*2*v6bF`TW4~a zT7mSAxV{(^9fD4_`onwka=lf}@1xpxd*J6SHTe)v*t9jY^utu+3l_BFMvzSILtH*9ux&jM9ehQe*krAm3jQRg5qp=Lo^Smeyo--Okziu8Wc3 zr>)dFeFe0hQhXm_b(Yun5@hY64@_1WW_b3tG)>s=rE)|WRvEqL-8ygRhWC7IcqD4n zMdef&*rNsTekLe>XQFgmSpXA3Mu{1<;&#)-i;Tt!u6L}^Q%K;pR-1GEKJ-)7=^2KL z?&jkWlnM5c-^k-?h0^UNBDivQuBt`oF$$Ix{6+l>mRt_Qbj5BN@n}29i9psH<_@;} z6>f_0$bJ*sVub;jVP;h8li+mjl}w5(M8K}W4PhB*K7`2K&!%tw0jV#^vA%_fX5}M6 z%5h?|M*F!NS6`4cc!*Yqw5ig755d3wegV)EuUW?dq*-YAj$C!ELs`Yoi#EA+v=ujQ zup}wCJ9X{8bR5R$xs{1H$}$jUyugJR4o=2}ljDExYWqb+&i;I==dN!#)<7G;@mG9c7aF;1UzVdFcozkn?fo7COl9F1z|g9-_m{E{G&5(p zL(a8NhSz`S=)w1y6Ofw|Re&NugXqXPGE9f97^}=Fl&**8bV_V^v6O7xFr+>=`GKA% zarBX=r=x$Ezm3~I%fDWpSQ=b{*V3eUSbLx{TugK_qVsgmI~1 zp#=Rf%nPwk*ih#9bIq`96Y5G(bl5j)6oSWsRC|0Z=(AyHnF)}_wi-mY^fW95hS2rBhutf0;+=fA%KxRUNjB@f~=74mH%@SkbVAu{2djzY)HZ z&cd?g=U-o-0Xr;<4C(w=1LVK+Cso2t!9BaA3`s#uO)%~1@p?zJLm8sMOH9UM%AH@1 z0k&~+J*89LzA%*_^$MeT?Y}wT@0R+AM%2>uT2aDd+Rz~R(W1zyKDb8DK>dT~(1xJ& z5T|?_wU=o3BofcDtM$q`EfdOy4cc6}Zt&r-tX`Ipq0vu0FM>c;i_Emy&Iv9(!$6YX zv$GHQwilq7%V}&qw8ibEYAmlZq0v%5(wrZ0CATtqKh(ij%fj8!+Sik)ZPH zAo%*qUhPYLYOv1>P29w~5Vrt)LQp|cRGxAFBi+lknu@`^Rm!`T7@|+w^z|)hEBHS` z%qhd)+o7fb zz>20P1!Eb23%Y}nxgejH+{J%IQ|ow-_6z~MuzFz;}+^TH{>o?gH((U zGs%+14xm}K=s|-yin^bc1Rak-e@VUNiz>`gYG~5r(Dg9$2~(dYE3UQtS{!wpwn$=v zF}xXuxEqln#Of=Btp9xuT8aEgWpL}*EjCu{YL4e~W~u5kp03UP6=bXuj?2ff{Q!2X zHpUqk6{{VQWL%jvG}BP_ie#>6dnOeAY0d0aoHG3r6T^ys&YoY@(qO6OYj?rsVa`Lh{9e-Rc{I~i*VYi# zBtRKH^CH11BCXWf-#6^{GoTm?Bsks9maTV^Z z1fUm?a^b+7m`bK_osP356JpcNw9|~))Xr~st_{MwL~-C-m63ea&5g@H9;OOtbuvuIOng^#HenG=5O?&AbkO0PImZZXMu;tc+$vYcLz zP$K`YSmcWmi2;Q(G7kTyn(gtZt9J78$EYvQ)CE|&ZQU({2)37DIlY}75zKvf*kx3% zxcD`>58T{P0fVkCx*4u=$;ia&?9q#t_CNd**xbLlD=ycZ`UqKSzy;ViP;7XAbE$8S zb&^0iSP-P^ali1IONnRjk2}@j8qJ;+5uUicF*u#j)Ms~xXF1Om67a5a(69Xp6cJ-c z=K-{K!Z*xs3~FUi<9LJ!Vi!*kjQQ*~+U+2Fd(#sAW+y*~JB!INe{MMy2;=~~E1e$H z*N>y+lzaV|3NZmMPdP}MBAlqPQVN+qjFDiBE%U&bCOqL*=#5XM*W_*L0=&zV6idR- zy%?YQbh7I3=GUwVO55&9^tin8ugn>Lk;8@k$`E=o`EVZ*H#Y7jDB zeco$xs=>@hxf{wzi!B+@QalgN=#PAAbjef3f(oG|&$f*gV<(jHIIZDG9O--%^L@@(1ht!Y~@b;(#R3{1O5*mhzzKgl3hBFty0a6E-B=1idyKmGg4n2 z-kaV8%F7yD%0_yE;;cx1J6d{%Cmo3l8*ngHlOxoI44GKQq**K0c^PSziTWQBZ1qQL zqGoG-j*hw^^sLqOFNWj zNg^R6z;$}5v#}9TfWFUwF3pBZOdqGcH1Cs)nuhLf8MadMiC4G5g4MgU9SE6rr`a&$KU-CpQ$EWt2h_<;~E>>>#6O6S^?gb(Kv zSY0i*35d6(dj%&$^&H;^2=-;65)p#I%w0wO0q zVAq}J2v-y2T~7IIwvQT+b)Geq*m<$l5ZmzC#(GP|HpN|j~# zx60*a6fLrrY}|I2&ZC#NH6C!2dk-O)8T0F}j%@>(ZnM)Z05xIEGm^2AZZDQF8~j~x zAoq+(2{AQm*UP%mvp0*Nr_-rS#paTeG9-1y+c?vnROOaGQ{HKSsIfGUJUp%M8awTn zazP>kjg){$&GnZhMSQZnc$gR1raa2T`8{;QlqNr-B9MGE-YZc4nyAbg$s73aFby`0u#X6K1@Ab1)Sj- zw0lDBI{VS6)^pw}@!`M4tT9gP#+kU<^+xd9p?K6V4M=@#OiLM-{ii1ciFjy)HQC`T z)_3UFwVKu~ChmNrj1k0JZ1FIV3Ll&Fz1oq2QGa9Xjs9p93rmUQFPzK-IF?vY_wFoh z`Yt^F4zII$&3$_6G|sEa2I`31SU6GgH!zgZ6EJv@)d{D{ctlxrH#}J-nwONaMGB(N zyA8qK`N;*i1tgBdjxWA0BG1|`vkN!BNlG+YsQ$FN_AqR(pxRL(*xQ|`OnPzI=-g4H z<>=mg7Wp72#?Btiho(`i_rYF=fAOX35pq9542qKJu(%f4HrX6R9^~5rQN1G>dsjkX zRn83=g=b^bP0OS1{G8@*%2_dEEt(mG4?FpnN9B$Niz5@;C~qtr91mBFeK8`Y)jy<= z5g2`R;5S>)T;b}eykq|((MLPMvgy|-J`z)?17EB{!MW~Fu1KXi1M~d%L-;orWflo& z$Q~Iph+!;H^Tn~YDnh%HCc_&P3tI&6-23C64Z zPskxg0^>pi_LdLsEFWI;^2=2XW{THE$ifoSld;g-3*L#68)E5`1%!Xf7G9nYMt*!j z_y-pO4blBJ{8KoZlm*?+E-{OBxQ=bsM?*j?M}n=O>}K56Xc_U*;^56m&-OP`;z11| zbr3s3cIoVWzK!^KmS13=GOW>WmIaf1od3bU!nh#>uYV;5xU1B(qF9CI<0L3vPP6Y# z64I34=lpn3OYA=g{RZhbBgyz}qYBc>^s7j7#n6n6Os0b%dpzSm~!jRDKuA* zGS{>G4^xI9T3e9-9A-jHQ~ya6N?1XOI__f3 z*INDSy8@@Hb*UBk=7I!GX2qRcE1GyH)p&I)X4Rok8A_qo-Lj_>QZV(1 zpdR;B#C5z78Rhm8C~fQYN09GGp{VpT4e=6v-7qPrwbW1R8wrc>yDu&hMr$;eaw`Nr zc78=_Dzm!va9U)Y`0Zv;6Ds(A?yEkhA^5*>`hJs=sCl4p8S+T5p@0`PoOnDFVYdD+ z5f#khbQud9qt}iou;Z2ymC)C;Y;|x=ZK=FN=tq;@+4jDkp<_0QTAVBG-+#asE+o_O zTX8s2#vC=%_$s6vz1E=F+6W&wH(q5A2Kd`-hvPqnT&Byz(N}DBrRgskB`1G>Cvv)S z=xXXjo}7N|@vm}x9IUEgS6-Es)M*D;F-)kAcMw&}|DN`tSgZn&QCV@CFdan#L!&KV z(he;>h#pWJ`zEDt0z{lCt0TRsE(>N4@w&=_(TUg)33PpNrl;$Arq7#n(~du5VG!|3 zGQAKAx~^9zG6nr-AC5?*=ID9=$0;;$&Q^bA3jZkdlIY}Y$Nq@(y5I37G5l-4+3H5p zJ1A2yyg>uv{Or}$wMh@?8hSy26}412MmU_+o2g8!M4x+1BetSM`+-!`2HvHD@;&q7^?t`n`PS#lP9t%NemWwrDDe z&a^Z!RgnXy5sZr=&AJr;Lr(}cw#^@sQFagA-{Gklo@brOTN@LXiaMa{2nkiWu^sJW z$*9_mS{E)(GdJ?&lm{oXf3Y+;V0z9rj9-)}DqpRCLlJz#%&tbF{l~q0V@DwAml;FK z>lAOsw_PIV%?`1FCBdYCi4OlC4nhp=eWKAl5N zciz{l7;fmOTh3~bDTW7l7IpJ^R43Ff@&1YJBAB71@b46GSso7Fw%@Ev;Vl^*g#1DN zG?fL2E1r`n$w;t>BKj{F>wiw>vmu;|R-^FRJ~Pi0RkM$F9dEKfZrQIv=Qp~=qtQLm zQ~Fi*2Djzu&T}25r45;iPp1NfRr;e$ z*a7F$QBGHgclW9Hkxb&meZV5idIjJH1HHDsH5fT<_A_L{@!H_pY17zIAQ-y{ljgr8 z+|KsK5Ep&77Qp@?JU7&P&Z%k|_Vq?)=2WN>k62&_R-%!+xj_ z&Lr*HG&uXx3s)?F!`-@tv|N+s^SHeA%+HJk8w`#W+&k@)MOUu*6y_2+=fS>)d~+fX zO_~M`rwNOa!b2e|-z?&Y%8S~!G=3d-CEEMVJKcq;bd;4i#n!p|88YfmPw6Vh&eW5< z_g=3223M2Zrh9Yd_1gAzWn#E+WSx)LC|q_TKmaArRpv+r1y%8XNBF;9-YrP~YrZ=G zQMx}&Eki!*L>W-t_N#Hf5OKk!&mOD|&=#V?K9E=IpMI40fF3a(?r$j$(3?Gr=R|4^ zn)L6T87O67c7frCCd4fp19H64@|B|0Nq+fo7U z^3Gwek5kDu0z8Mn~qy-#fA2_6O(ke;x%>g`XH-?g1HVD!!SB?>>YxR&~7v05lse zFyQzO5ShpP=NP=(UrkY?+0*rALg(8~tu$E>vwTIiOZ#pML*7D#v>z#ABJuv^_WF6D z`9;atbZ-;j^dp3Hsz*1dK3lWMcYcYRkfEn<4*rOFO~#|Trzz(Oe2)Iae0#m~29g;z zw>g-vyDCO5f?dk?mmh6SsQ*;|Q0z(D3m+CG(5J=`&BcRX0VfOIZVOrp%o=&y7N(Z8 z5;LpSV1*gJy`Nz6e(eGZ<{ukjqs_kJvEnabTGT9IAIr45P%_TC;zOC|q_OWMiW09X zTQPus$#-6w#+kE<5PX?hKKIAhK{5O|hjPVJpNk6V6)VFwk7VCx3RxjP)bY|w*R2oK z0V?ujo&vRsok;j_OiRZ@V7AW{<%qjcfkA`XLXJ2sT2ccp%-ohk*G_OG+>;I+GiV_o z?=glb><^z&8WTz7d{Yu+l5#BB64Gq=XlX~wnQ5L`_6A2eQYM*ywedrae;r!V(o|K6 zRM6s~$B?*!B5Z8skOXnz#BgV}{yI}vtA3OVD?9XNy5E1q2>+O=c<^|+b>AwaRecz#>cBCgC0;mar^*9< zD}2m1E}`Or!mS#!4jQXKrBz?~F&2krZRL7B$}kEfurO>;TjN%opI#Mk&_0`cXq7xO zUE5Wyl^XuQg39{yKRPA)-U}T|EI()1HX9{C1NtD3ptJBcAil$*(6=CN-=m~ z<1e!H$e=_Qe~yH_BAJ59qU_QQOC8ysjnZn-6WYM;U#S;`7)qLyOB-4qInl<^{)R7CA{(^AS$rT_9b=1D!nT<6ysy{-22gq5czy;F;Z0lVM{wSR`a zYN;;tEslZQtH*%>4l%fC?sL|mG_|&gTV6cRD*KY_Z+?i1om3bJ7#q%&CgPwqa0Z*L z+_^82ECFgAz9bpJ5FbiB9$7=dD1@KpU~0T1FCD>2vARidxe!GdK5Co9YtI)P0Cy$; zrmAby$fBE>@g69b*R=cTw7G+}LuXHots(Cd?~SXK^*f-you_C`G}N#IE9vR~!Amz- zlfJ?dA0baP3SzI|gei87*_YmA!hmntNFQH)uyoxob(3}~ZWa5xfAu<#WG6*6X*)#^ zpLw#Y@mu#1u3jcb0>3KP242_Wh2rDA38`c>X^hp&-<(FuCWqSu4Bg)H_c`Q@#RR18 zn}4|shnav2d=kh1FW_3gcwH%|ZNG??Vs&0H7pxl~;nw~(9S_QMnwsTtpVo63(_+T~ z=&Cs^04lQW_`ctYgRWLH6>3Nm;-=v7VYZ9q9qX)hAFY*wjMT#hEsleS%wcVM*WjmDRr_*mmm*#kLTrv!Op-^F*RX$WM=e|iexaM;M#P&jJJ zY(XMS*Fr6+m}mxnR>u1B*%Ze z)R$niaG6775rN=dPe zK?@v5(|lgH4P_Dsh4gVl2d0My?BdG%D;gTX%7cDb*q21q?j6?ip|El576sK2{9`O7BE%0#*vZ4j^$-s<+|57oa%5ij{^rL8Awigy0AzJ+X=*M{z*A&>RUVw}FX&sRvtr3%?+Cysnvlsyr zj8aum3Qg9eWzfL~*`~SA{v<$7HrGK&lVgK`SQ}r67ix2AgZ=3*pZ&zsvipzjk$T(F*lQJQgdW`#uLJ zCyZKQ#zq_y$4+T7id+~r3NkZ=yl7hUM)PeGab(*`(t>}auUq$b(RZ{iuH5Ke&P^6j zo9G*yLa^OF8#`&YtKjWDJzG}yG`9t2gKLJwJH5ipOgM(|LHu3=srJxV<49?8lr(P@ z#{AS!W%a+o40H*T+Q^KMIqk?*WJavM@&zV|cXJC&sd`_dcc3nGZvA=wpW2)=RJ%AGCS+GtdBTo^YiSGYL{5| z|JAV@Qr+C^KT={g?YVyC(BVrs@WiFm!o$4eO%>3g<3T=#8+y#Z`=3+h(kC}>SAOi7 z_SFy1=Xsh?mc@ntB;iofZzlN_7kZhE47(u#><`}dhSQhndSr~(^!-AHxlWhX%Z_cm zs!HONR~|U(oGygN%798^oaA}{mM$RkqG5mabbZ{x5^b;=HcotHh2tgAj`{r@6TnNu zb2WKlV!PggAOXIfD}TXzFO}tAp1@+wBGc^?(*eV;>x-n;H6GZOC-f&fPHFRPjL7)< zYuI-KqqdH8;1xlhpBgv-PB#Eq)mh_f3y`C4saRLH91f|nd#zH-nSi~68CW6m^&h9o z6(!#A%Tyh)4s1=J5SX4ZK!-I6hKIcCB7B}|v8*r2tmotn>lV#O7zW0Ss78OgWri2f zT&9GXHwAtlH||RZ8pJppney-#;P?+`;sqCz@I=^)ln(FWpkTVsDthO!d`so!`Ux7= zULT9>%KF_*&`KPF#t$$va;u?`t#D3HgyRyth6>mVGY+G-->_QKfr>gR<^J3V4^W#19545wgVZC~N)p>SbujA0tPkU1X0f zxDf@z3<23oL->dadoIBEGS5pl8{w_#0A*Nx8!3B(H3(;-oM%qhK3jeQTS3nB&sf2C zyXuANwC4Go@X~XqG#qNbzFmyC=min4HBJVnIVm8?H2J_(v zt?Me-IMQ>hWp*$`A60!2+tYa`h8tLJ7XP<%*0_TON$m^4#u`)0zqNAbw%!)I22~s0 zV}E=-Avj)w>=GG9r3oEvehh$s?ifuim`k6)`|pc<^GUC@+gM7soX!2eMdkf5 zQrQAmz6O|pkzn(r$hEiXK2E=Tu1f?G4bc3vK3V)-c;-H27qarWDd>1zm#A3Xy5U7i z(eVj!UHaMa-%1xM;;npy{Jqpuz9WrGGL~K3|7F~8KPvEc$1F9qF5d11qo(75QMiYu z%$GqD=mZRR-XkR$i}CHPSj7pP{z32Eb7-mF!c)OxNymYdZhqe|ntF_1#so;{4WLmV zqLjbDh-cc9%#C9hiYX+}`+dffmO?Ok&U=H--tZ301G->j>RqNDD1;xOJk7eQasaF-0Nta8WIBU9J zf-77ZKUjk!`9r0PygcL1#c)v}QKfn?s57NAi8UPpLJ($D;3=b3H=rDA(sz^Pr}nUJ=`&O$Qx;hb!ORw=ljUOTGQ-sFlKHEHtc7~qa2S?9jD+1YL}tnXAD@O zXOPfyBWB}Pf@T)$tGt3tJa_l#{J?s06&EZeQ1}7k077qg8eM)iL>sKDU5LaZ{TLOKrq zCM=UV!x-aTWBtXUmHpzULiXjrRSshc1*ag~m(_E-Ya&+TrpulGvn|8e9B*g3k8WSt zM@~o)d!Z=4J{IcOecS%GaoZ^VfEqSYm7a+hQ<(g9x2|l1u47U4ydTgzQy+9itlCQd zcYoq4=P73L$hsOVJGuR^T@6<(K=+F;fIP-79#~;z2(vx~aWhWo=#~BME7bD|6`Eg( zSrE!9OBgU1h)y8|?`)ERdak*90Z~7${uXAi?NI3Qo_OY6+Id!RJOFAKHt7XOVL=~h zunOh8D*`KP{LKYn8LfF)Xz3k?I#}Z>Y=2K*^kgY|*Oh%Z%3|B5TnHefy+@>}o0mkD zKdyyckS{7>qG7zZckLCWX}uaGzBfp7;34k~I3^@89297j`I(oY*;Gl)g%K~2;*1QJR(o6l z!=)kR(@W-YlmGLXlazhy!-oeE`{&|9fl{(}JH+m@f)Aj8#&Rv{LA(JI^Ts{xnc7ZG z{#|pzME`f6SM2FaYA4ZLS_)-d^#HrhJ4_(b^UIjfdF&5}YO~-r_#(8U-{4Tn?k77f zi3e=7(2IOQq1g@9eqlv`h&0xn%Z0c6t}&)*S8w-VJvwzg`nxUPDbm(WV(`Q|Z}8ot_P0X~4o zoe!P8jw-$Qem95<>y=Y*8R?3`$5h+dUL-Y^eGD#6Kx}Bqiw_I{-iqG~Hx`+>v`X$= zbx97H*#RoIRq+*)ThG5%k=F~FG-0D#bHq&&!9PPXLjvgLPM0e)GwYi>Ut_}M{Agqi zy1x!d9kPW|>_3xc`YhMqpf zLATQhnJq&XO(nIx;cgU~HV9P+Zlhch-rLz)eb)cHtgfQOQgZ!gHGskTr`h3xY+s9C zdC?t-Aof4eGrE%lTMu&|3HAlG{PzEX0|j35^Bv2z8?ZPz|9+ZC`uB(@1N0597@EBR zk>4PF^ZZoWx~(moiv>U1fFD!F-JNeqgMY;E!+G34|G3GM?uGZaCueoLdFQQwqed;! zrs}ZL#N7I%S5LP+7mEWeSADrDQ@U7`vMV8DPFrl@*}dbiKQHJC{&AoZOpe)rq?(wr zDF;gNeL9t!m5i7T0L=BUaEK?!f;2+USJH}#y#r+J;dosv>i=YxhN`4bA4icsz8t)r zjp0nVSh_2JzC$a_PaW917xoTp(({sR(Cpu1751iQw#3rs7+Uwfj{}9R+Hzk5+#P;1 z`3J5nRj3}_q;n+?J352N9yq4Bb}t}=cnUER}vXp|qg zxWYxoJBH39@8E7_1O&wLKrFZ+p0?Cw5oxS*(nc<67c^ELMdJA zU~^O>i!L|6z1n_&9GjIiBZ6AK)$^jgD4OT~6Z z{l9P3cmbB-PcocnZZ8mUFLpXjm}+LXqSa*Qjon!=WP|}8U;IK*rx;_DC=t9umO$UK@LdvK1)UBY381tbsXkPLg}F3H~4;I%k z?&bl=#WzZ#q#LQ@LJ@f0#K_h0KMxRQnZ4^PU^1+lhrHJnO*@VL+{E%)fkug7_La4Q z0KlnPoF(#KI`Fsui(524VP;}N{x5IQ^hD%v^#^|3#3oek_4~jHt{YrJXZ;#cCUMe+3*(^ehEJ6=rqB@yzKEhi$i1FWWNRlKH~3LIwg5mUd)Q4)Sx z)Lu2?;t_~xaf|>Sui0~gG?fo-R#s)CUQciUT1o079)P|it|AnY33KB3o|rj*LY1gaN1yb-|gWuW6;Qx2yho*ABwN&DcJYLTkS`K zZf7@uuWAJnC{b8b05xe$+>?7UdXNndg}dP#1+z7M6J#8~41X+WanRNUU2c_2lk@*p zWM3vVtBsSfc^hsn?Bm6>o74;u-7V2MMf^JSwsB^o2uhH3H3m>gx7g5Qm3VRA*I&}) zHKFjLl) zdwCP}1bI0oz|M;Pb}_)>9lJvz1%;PrE;0<4)(||KT{^N{C%^q5EBlrAM|7UM!J@$^ zsSTR&29)X#j;FUA)twNX(RmZL(BSA}1b-2DO;EBm7tu|zkk58aF^Hd=g%u|2fbC*Y zopfi1S{CgdNR1o%qTf;F4OG<1gZ|`y#B-^!!|WP4h1}jkMqMK<Z4`1SnW&2};E%U{~c7%7a2 z^h1iL>RPI`hB25RqorRM%Zm_?vCg`MANR=lD`0A<($FvwokjwwBAzAom`}gMz%DTGK4h49=^sxkD#4g4)%+DsTz>6T~``V+O-}4LWe97^9K(F|eiC zjz1;jSz4ZL)*^aWH=q+8j@dV7Ri_V~bDR93xLTJlar*>h#&&=mq;gXF2;e~o|1WQ{ z)N7x@$btf#$*i_sYeMkywFX9mx8uAUr|RhcEhc3+r-&N>up3|Wc&z8ww#&C2QzkD& zKjl*N`po}BS?Q6^x37i)4}|rB63n52?JJPiTk|R^rQhQlQja19+;o~jzFM_!gt4Zs zmp8k~w&b~@NZVL~&p6TPe;#N$15YMFM1PXyCjD8mu7JnEbhQzm6Ga)hFVE>Ro4OrW zKJyu5`)n8Q7!p3#xS}8f4pet(M>zmZ)<*_g(!l0a+n5;rq z$W$+)WvapC6>~EIeCb$ClMa_|vRVoj*knor9ND$Iul!$p!>vbZw*{CU*T8c`ZoW!N zX9v%xyu}q`fo|&=#Sedrmq{goh~($fAIv3qgLnvz__#!{+%`5y!u6i73f61QYYXLT z1Ch)A{sV5Ge+@)^9nfLl$E)am1If+nRR3iF(}&uXjq1>WmF2qbg2o)-1Uj7>8O|~ZO))Rg??H6^%eSg6W{N2=&i#v#<;pie9L^8mW8k@*Mtw* zY|JW5P3TzAr|myR${xck24gIvrKiEL^(j=p8+OySbQ?Ct_8_;1pLw0MxAmJPmHGy+ zD*ekWwU=TPEJfKWG?C>ku25))_Re8heA|rPOJaVAfR<6=rKJ4fBI;2u`* zQGzMCG?OU9xUI16Pd6o6Vp3e-9b(zuJ;Uu9rf9)d+~Dio`FO0=toM=hlxEp9r^Rxs zHDZK00}s8`zA0krQxgY!bwDi){&n2=15yqD-RT~q$n64bIJEYQfiV}(q4t*&jRw)X z(ON?brsaIm8fg;~VpCySyinJ5qK)nsJoN9GT)48tKs-N_5bByu{hy?yhJ|Cd?p=Gz zNztlkYz)oWoN8Q#ABfPQgF+`OgnHO8`dm>5L%sv;z%TxTP6a>tw|Yi+!dMlDJ(R5@ z$M&v(;f#K3v?!?vyjL82VblpC>@}b8-OCfy$>;Gf2GzsH#BnE_8E?Da}L-_Bjop0F>ceHMAfRYX?5l^njd{PnS|N``0csL_wh%v)9iXOLtSq>*E=*`}8 zhl#;-yOH7Q+AS(_X4K(m`*1-QC@S z1t$>P26q|U-JReLgS)%rPR=>s|5x3Y`!+8#RkOQy_v+QFdt*$ye5?pi2PwkW9UltQ z{vrI(4Et;Ua}W}a6uGmf19V-j)j4KjBbaC0eGW~L5hjOFMc(MC}K9h(Il}~+moWh>tSZz8jY>~5M-axg!^3AZu1pgjTXjg zT%4SpfIPMng#DtjVQIln@cm!g^NZ`^>#3!!4br&-D09#=gbQ%Ild)@-VWaY7M}d2} zYqt4{Ryo8QIGBK8JPMkS>84J|y~;f)Hnqvk)K&0+Ov-BfmPk72S0R20DB+F=BLbqgHi( zGwEo$_B4(I#b)EjtA?PAp4jEsk2y*CL7bcPu$o1oXoF-JWlg zi+NbuP9$xrMh_~YZ}QZuN<`&v>3Z@*;*K-rJc`Ro&Xj%q!~)Z94CP)JJ~1EvqF+LT1}m;S^t=rS zQeVmx?Y`q_QkESanuCzzl?!|{JjHQ7?ob`9U#GtO38wqSkx)zoA=w&lZPny2;uj{k z{^@OKbh6;Sabo!tH@dK|^M`-VH`-FIZNVnvbyU`v=k?%yqv2%_tK+tAY}u>X`f;C4 zb#ipa6H$5VCGWvvUEv0vce`q4{5brKyHtO;Ii>%fyFnNQ83g(A+0J_Ox!CoSJcP3t1%ZMs+WFlJvHWNv3uM|e@W)m2m0D}B6 zdU;ym;TSV})DgSF>abE%&RkpbXerV5;k*zttsj{;%6vOX!Nfdnc?Os~KLI003OiOh zHhAy@bL4=QF?&iatw@S<-M6JS|EDXZzC8 zOHx<(Xw23d>Sf}W37iz))nvV!W;=AJ_*zgtX6abnI<}hH=Ug8hw@3uy=H}J52-Mj_ z-TckY(gJFj!R4thn5zvV6e&X>(_8-OrA`~5r&e3Rm^UK4!Od{?ohnld>VOADunVtS z8Y9H|mX?%vi@rpk&%=%Xz7?gVeee->M`FdGe7P9A25$sP-E?y3ssQ2);i)EnjluRG zlEf~7_*%}HUAF3O(jS5KMqsZBlaoP;?FAP_$el_+x=;!pN?fV(wvs@8n>SB;B`%wM zAcZc9EwGV@c)n_&We$KC+Bu-rG5Ae9EV>I$t2Z{2O|$v;8DLMnTmWJ_iVKZ2q0G?d z7E)L$OZtzzWtJD}l;>#LK zCvKJ;{^)>xqDj8GRU=`7n6_6g4h1*WZ83+>e}&5K_sxu&g;VK;i%yz zksENA>~qY9`uWn_K~>#U!e3LZQRl=Z zhC92RHkp?k`6h{K9*A2MRlR-j(UZXm3;o((P*+jVa(IPW0!ATx1(4Kmy4p}U9xAwJ z+nppTYz#AFldL}as<dQH2jPZCD6~pBJQ`s)u$uW+1;b@*jN3LrH9s2+6D9-up<}J!0@AYdA8@U3BV45D z@D?a?#g#{iA83(s%${>|lF=6Vz+K%9^thgmdplTBj@n%g{Wl!8{Q|5G$1jpLH`PD0 zZ)LK3#8uU3pQM5yUlYH{cdXbW5}RbYEKjm|jRntg)63y#7$zgg$iQ?T zRw4(DktDGFYqwJi`)3gcx2ga(VC7`VJp01Y-~Gtg?YZRaqd%u>cgxx8{aXmT12G}j zB7>hB^-9#Mnh#-ai79mN1tiI%?pyptf?71l#6iu|i@)d2O*e#P0*wIlmuN3IUv9D8 z^R`ZtKjunh=*YxGY!UeFoh$JfpVztZ7l2ZxR6R;2C6mX+L^579Qucc#1e1-m0;3!H zoF?%g`rt;|LmY}_+1$^p*FYtyDS@!0zF3x$u#L2$5&OK_cAOY6|4@vKwR(#!-rs3) zUA+@^mbjEq1{eO*+;p43j;|HS4Iff84eDP|roRC1O1g*rG(V=;P_ym5QP9tgG?~iN zEq)U$B^e~Z1ls(v7+95!+<=Z9`!sme6CI((aEeNQdeZ6#Vt0ED?@C7b=+{V8c$?rBCT<%e?WYnn&l=*nj3V# zO=f<&!?E((cMZ`Bgv0UJ3xB?t@Yhr3rDaVz1AB-CnL`DIG9s2_o%k}5Jx%l#hy@~?xZDlMndYyFRPA}u-R*42>NS(+OGdM`a0W;P)*idk2sI;o9@9TS3`BQI$ z4F1FQt*5sO@lVtK(g%os_Jqu)F7!z|r#VuqA6e{s^ncx0-|l%TzNWHl_I7>p@yIIi z&Xo6W|0qx19s(*_JJ6f&(0nb5}v4!&})j1FQdl?9anrK9#!FAs^!zt;Obxu@jhG8tod& z(WU8mEXltrf&WTSE%2a4JhSGqq~0R0x?b=8nhFk)@FWmvK+co8C#`MD>kUf;@EXh= z$^T-{a(>%QX@2hb<=PIy9a8(3rLmgsF4ps&*QCtCGQTa%yb`81bwruc&i`4RwRTmX zcp~PvYO4_?)8i)E-AyD=5ymm~Vcl+3crO*`<|lk~NLHRx`N7pEnIQe>`itz=z=sdc z;R%vaB&6_Ey-DTbVW#V@3!-rSV03Y`ikN;C8)6!ueDd6}k!iF|WP?sQiLgrM%r5TM zbxU)H?Fzk<0cPSN!pJp!#{cZ^3WNF(xZo*x9$ol-Up8XYtkKk zW7N(KHtt4HVz=x4UFsRvGu8XeB{XsMZz9k0>S(U!f@by$~{pT+P}XsegN>nqR-I5<&6*F8*zWz1GQE52jDz;UfEGV$ABf z9FPbCiqcy$!hoCX3+JlmV)7&fFWMaya0Gt}^lLTLh9Tn(9#-sJgDXAjDOa08AuPpf zLVtz+{PpEP8*xD=4`$wM7QAHS?A@6O~!mucKI%3r9FkvTAgQg^r%|}hq zyf+frMCxkj$LzGJ_#|#%9~S|DdsO7R#m;E# zvbUea$Le;h6m0yKSC}Ni8Q5QRVCtOD!*20}T0ltSQDsd1PuB#RK}Kj1-hM{6cohN7 z$V2DBMdZ(%dy~&xDwk6cDypc=5?L!DKVW&UuGbjpe7su`PEScKwyj^Em`5Uj$cd<* zdQaZR_PNK3Fsp?9V>)X7o3M+{*z9b}V?~2(L7zGjb8jT+kdfK&97sM41CkeLk& zH)^l09sEW^=K6zXstZS8OUZ})-tawYV`IbJl6)XWsB5WE97j@xrogjTbpQm2z%oQw zw{^{r8W+9WA%r5WtB?#pCJEaDlX&w_q7mSh$+t7VB70brP+~56Eb8cM_YRY3&zfNH zH=63}e=qJkY7_iQBm<%u6wpo}#K;{lrk;Dcxqo7MjVFh9B3*Y|4~41G+P`ai`Jw8r zzmN@rHPgwK9Ub8Z_g)ZD;_YW_c3<;p8)Hh?T_Er=#N2*d%sQ@p_a(L1Qf?8o)4l!g ze8N|wlB_pd=+_DsmIMfEc_o~;mY20RYCXM)Dn*Td(3)1s(>sZw_dz%#M{=3SR6oPP`-eaU#D2!S$~n@@>^;G=K4PL(@OA ziNWdMMbe?{8o+>#mwAur*ied<5)(_7twWcB`>351;=aPC0W#cxvYFmuSg=qubSWrn zFLKTRb{{b@vxixMl$iPQEaSqMuh!W>@w8bj=(q2~#Nrncp@C499~z!-<~b;bET9x( zT;iD0AWgOS4G?nAH?n#B%-OpRzx{Y-DBmP9zuQ$dYz-U|+p#l%PWr%Z)+5L52R2WirlhGL?J4+c!Y#-8ZZ@a%y)thVdO%Qb6+Y+i@H%GyangM zO!(20GD@#1M-?Uur-lY&GE~9gD`ct~xp81ic+p``=5x0%(wXrZqGW81*J9}z3s(-+ z;2ahw;V>}=0z~2@sLYYVQ3j6fCw#LZDbxM~TjjnM$F^7)BQ2$`Id!Df(2ID zpEGRUT>)L4o~8ZF1vCD?nF<=YYDYfG$oY_`6Q|a>+h(KUEf|VtdSgkMKc?)-RTa7) z;s+9&v)ii-?Df}MAey*RgEa}2=l&PU58Z^FBZv%Vz*drDyH`@5+sj_P0%b^(JpV}8 zbnXK^fz#%+<271-=B>R@24M;8 z{<2rFO-7Q019+GN%T2NG_w8^e8xg*r?o`(3;?bT*6ux`w;0R!~CKycKB}4^EGvD82 z!VN^^nGZ9cmCBNc4N$v5@{#)M?C!b^8>u$UNetd11B~3D4L7^pFL926?KZWUtMh~I zo^Ld_OU<16I+mQ()osCqfOjOQ42UF^;!h0CjTOUqn%CjiHpGqs#CM);ixAh%&f^ur z$fZbm(i3iDJlPDRykb=TAvZoaElq7@i|frkQe=k*2K;A3G3vuBwQMfq1YSfDL-30% zkEK$%m}5;gVWvKyx*Zvj?QQS485Ortul?xQib^d494kbO8S5+#C%xJK zGf3*nb6+7NvqQ_Draal9+qynjpR;p};s}~JcgF7x%~?Fw<)l%uH0t5H<%I`fg3p|RWEeidIIdcUi1U@fF$cidnnYB)^J%42IE zgqhr{Tdqt6Vns!P!$SQH1*T3?9H`WJbObKxHd7j7qri&r+k`Yss(traPo{lW`+cYT z7x2;eo~_v1L3GAsCWxVCy#wt{a>DUFU5RtML?YTS#xqU4PZ-g~ddk7{ZsvaT=&nNU zz{!NwK#&FJ(GXAK&FxYfHI-KA4hirg88=2y_*ZoT57k8?>vaF zIX27#|L3atb(S_A>fE(%`;qn3pE~Ylc;nI==L0ozIH8Up}6+9cMl4hG>|>;qXpAze^`?`S@0GDUM$t zzVLM!Q%UEtom1JX%N=`InBr>o?>_$z2>sS}Jxy|atrLx;z7Il1?;qY_?s`y4TmoJt zUN#0x5!&`Dj845de*{SeiR783!FQy*mDl7P2@1}~!ZB*+k07rg9-lAoF9-2?42%)z zt*~**_|5VQeT|gV$w0N1g1^c^D?VwQ=bB_)m)Nq^@3-nhrV>Nj^Q-C@3jqfpzCyZ1 zydo#q;P#v6RfSEh{rcRN{#dkAF|O%d=hlay!UFq_wSkmTN*rfDEs2izyPj@1D@*A5 z>TjWmmojZ;j^@mLOCCuK&ot|fN_UspiLaO!q*S^ao8Zl8ZbRVeW?pFS$BLTp#X~3U z6mXxJnI@4K(8qSs+yq8pnUWmaquKU$#j(FA8n_~-H0I=cguP8O4FU#12*^>|WdL#N z@H@@DN#n=Ls;UuC7Ud6g*R&iU4h5M8E+UyFEq2Ae>1*_U$Dp3q3 zpt(niGGd2#gi14dM}H<3)!pvornT>*1Fg$@2Q?c*MeBm21fy=+d{wj5w-Kgs!1E6e zR|p>OnMZXSLheKE-S@6Jvwwx1Tj@7CnORfZ_SaB_f9M613FYUKFDVs$=-(rQ2fL|Y z6i{Onx5^XbtZb|1A6zfF=V(zf&xw4d9w_KC(OlW3x zzD->7IFU(ZcmDEWGG~&yqPKfYEY`aM2|o#)A%_sc24LhMeE)CQ-pSp2fA_TN`Bq|G zueI)mZofjea8bFieQ@lyqqEm+CzQ`|O>lgC&BM*juwEEKC@X9)h3~KmPvfpv`|5*f z>vrVmd*-xZtK%)W;k}xDoo1sm+B|8e)BDKs$w`=K-}#2SX~k)$jpuB-S)`_dv#MpC z-Y_+@Sl#pSu}wHk{IdAiCDlXg5IKaC6kBK-obt=QT2Fg?N+Ij}ziOhiRM+e@I<@sY z^79>kwd@?YJc3?+328Km=^);IHs!JjqUb+?$YA$yK3eQ~2Lq=SpWXGx6w^}6(V-@^ zhC%pXk*hxC(d{jXN(@N%sH5HK>Rfod?4eR%Lg7ygsd=a7GzW(j$QW43 ze2_z1iEWG=#AjOb-dlaITjq#SuUh<%q9u@yGxC;&b&eEna2&3|ny5WReLadOUV0_d2@d`9%+nw?U_C#rKAYTQgqDXuw@7SVfm3qo*X%cAx z#!ag-oriK7g3IcKxoWp7LjF0Y;(GA{Qd9BZz!_N8FLn-S8$5^WTru< zs@~f9BvBWSH+4E-N^aMkh9|JkgG>Wbalu22;AV(<6FM-3c7#NsH0zKsJQu@6hw&fl zSRWr?LAi5oxsyqIs%&KGoQL%H25r&rd- z@oyb7v|hjA+Z;(_+%xa$8t*$VnV3teCiln*d~Mf+W-u;583&%=LaMsLuh4CfB&Qx69U zxo8P4o*Dvvt~29%*Jd58u4~4wWRknxE*y?1GSRqlC_;a4e1=1pd)Si*E&X#=Lp8VG z{^M%;ZMGws6jNMod3euCizlps?+8kuqe9vJdPTaCHb>ax{@DfwdYE&1nL^e|&~Su^ zZZ(hf{=SouW&AmntAk@E z?Yj08QAfj7%c^B~+jyjOqqTh>(Gqx^k-x?^{r+Q#lzw*O{r4$%PhZcLVqV!9V;+C~F!V(he8({>bnRd4e=R|lcM%+{Lm2<#iT(x0G|Z3H_8%)mCxaU? z%-do%fdY}5k!AC4${uKGAakhO4}f?0qm>l5AbY$vC-hs9$)>N!m8)mhEo@SJBCV>+ zCYwRE+(q0c39s7dTk89w)09}<(H`}7u}FEXk0+(e$WysZbN1b<%%mRHrtj6cR;Isb zO!y@ZDpsF_O&+OJV=7MnA#V1MOX-8L+r(<-+Qvok*t@W;qsRhI1qKb^8qW`5sq$Ht z%V|ubYXMOJbarR)CQM>ryS?@>AR6y9)|2%Y$l$)Zv8}9dMO?v#k>OYbU(UeDfs0YW z$=>-jsqB}B?i(t(iAKoXWAuYlB(6MMKhn>&0*snmF-J2o@Jmcw-QxWj74**6^0Z*&uaM znt3!eFvkCf)ut-mi_*c_{Ds+|>!qeq-8Uk+>`NrT)r)04$Un$&a~<1RlP4 ziM(|f0PwgzazwV8+7tJ_$B(P~7IX0DmlYKXqXOGCV%O^g5Je#+Bg}|iR|I`=@{c(DvQ#E!ogLaT3{+#Ti4)46;? zEb8h1*ymahUrA5_=RZLT)Jk0-RnZ`bnCIdxs2;ZcpKZ>U4?b%J>Zt!O35*gQby&l5 zlt9gys;b{ufh(N}`?PG8)c`TvPZH4$jy_(vEBoU-(Moc^;hf2s4z<@^==F4iiSQS{ z7SQ-F)tZfu1YVFO2UXO`Uu5^4>2Cu9s((-2xP0&I{sM-TWB{MJ(fBVcRI}Xlmc0zf z7z%Hh)&ynC^*j3uDB779yG@Blu5DAaNV+Ur(R;rpOYMe}fuO2;;wGl*#cz}Dls!xD zD9JDkR(o%Ee^LgfWT&z)n|{UdZ>{yR=T z4w%LpnO!qImgGerJ`WY7J5uf7eG%V${DWqQ=M?z3TiYh{3z*$N7 zdM+*jBJzQCS+kU~PQ$v#I6Sb{N%IeGw$Ib#<=FOpiPIrme_53fN*1^5u)EWsR=vTK zT^)lk%45O|_8|XZkfl-JU1DVQZb>J|c3KD9T3%62otIhno*$Lk&&$z^$OK^a!)QPi zvmqQ5%RbO-V9ZyWQ^_;N;A+(Qt3Tz7=y45p9F{JJHjQm>{xA6m|A>`6(A=<68%!H` zB5WV8F=rlImxIwiL*no|BFh-bd?2SgQ_rGNIE);kE%nf?Q%_j*WgJptvTgll>f>CB z_PHnmOikpgIgUY6^inS$^W7?KJ08NkTiE+njit+=aCB5who0ev-fCVQI(1ptBHubj zKf{t__O$oK_thG~2bsUMaB<9-pby**{QwuBGp)@2OcQ1AZs6;MmWAb&Z)N{Um(a@) zc%m-abMABVwnD&dCe-1-%Lt;+n)$wux3I(dGcbqa~jAB z)VqA3=gy=5T{lmif*5CNVSbh*WTo=&=OkvmIMrP#l2_lhw@GugRF@x+MEt*)r0Mf< zp|lwK@kF5T)XZL~naoQ<%uz^SR)gOS-I1Oj4k@wv{;E=i0&j#{dUUD2US>l_I#PuA z7n4JlWd(74jiDfRS_KlmKY^OPSL{@ZqJ`vsVbPK;LheQ%r-zE+%Q!?RZR1!HS6;>1 z+Ik{2k{XT6c-vUxxdY+Bqqt>|b9u*YLF`oq=U|I*S)Xr>)AIp}V#o4&^mkNydtqlxk5(Mp<|9`hurMM!o zJep{_s7Z%3K)Hhd?{kKB#{W-gW}>C2);@eAwm*lAY6gE(&+RJpH%t3(K*)8cbW@Z}vtgf4Yx{n=*>7U@Kx#1p|n9AN{cE@>|2S-J`h8D0v;2Jb2el z3+sEC{upCdqvWD8Ujn^CdocJ4E8t!!(3E8ls|g;BFqDawgU?)3AKv z^HB!jrFx1=u#azW%)&(WK{p5zT=0jm{$JStS2`G^{S%QvML#XeM?)Be=VbnAmZZ^Y z(|`J}9sj9*bPrz~@yb}e($x*v7(rr}20}+(V?kmt|3Pnq22)zG%78dzc&Jr~aEiX@ z=@zL|7()Y(wN*jq`I*Sp=%k2T@vo8@_ijjteo5P*4<7hsVI!!jQXogz?0aGE8b;St zf+bE7Zvqq@3Ze5W4ex^)$S!n~23U}M!-1$wOF3TGLi9K%{T^!J= zP8(4KDT4TL_z4Vu>fv31uebEe1S?Bx$7T@M8?Ar#+g<3U>gu|;wBA|<@ATqUPi|t9-A_e*98!tmS%hT z|1bR9{OjhL;4vsDHuhFnC}k+ryJ~9%W+7WBt$xRDmjJL~;;{XPvqgntz9p!GO;w_B z3aS!wq}y4rwSreEgo3F;xn=2oj16~A;n$0`AuoHB#9k?*(LFRPcLA}>n(LUL&YZ$V zJJVA)jXA#G@h=c10V zO7lcOd{Lf3bwj2Z`ei6~E({A3)CZD}U0_c>mo{Qt7FcB?br{fy5HQLaKBTl~iwNSX zq*M}pix8yF^}yueu&zF~gsG}te>u{>Y)&gmdT7^O@EqH9Kn1h%qh6fLu7M(Z(@m@v z``%wK$n@4Gwz9IfWX832WTkmHn3u3QFc9U0l0t~VZ&IkP< zhzG}^uU25jcTs)q8Rwxa59pJYf6s^=iJkc}FFe~B6}7}JAL06Mn-L=5zo7G9v=ThM zIvb9U#+KQ#oT*}-EiqXGYhR=f&n)3XJB7oYyP>Ibx0C-@Wf3p- z&ew#D;Fi=&IZx~&yO!nLHyWLz2FQwW1)q_VFyqz1(V`vokKIp@B4QJmj8|zVwJ+!oXprF* z?TVrmD93>L=ve%J*S|qKa0spAL;p%eL8T}lP98bM(RZTo(!^W;?ePz`$87=q^NATW z2t;gTBA@Twel}3Z&RmT_g~b&2%gwcW_i2f-Ki*>~P|El~W}nTi9Iyn@Oc%jH1f6Ba z=4@3YZ`zl07G&cQ2WIxw6tr%*&;nv*rpI2DT-WOlt*~C-_ha2B*%etvr|*ySp0K-R zw0);!%#4ze!#0jwt7Hmi7Ryy)$B4=Lp3~%b6PhK?y=Slphl73P^@1nV2txa-wHnhP zdWF}?GtV9lf@The8189YA>)3I@|0u)I(!`)P@4Dp`)y8})X3 z?DHu5@~QaL9;V<4%PCUJoqL+5pmgLyz2;>jYXig|T3Bi0$hx(p@dO{MwD<#-a|xPW zpCZ97mkSwn?VK!aOuml2yFVq)xS`m20|A))2g;n(!Sa`i?(nO?E|4jBabAwt|1c1c z1S3%$G1kGZ-X4{jOXu-Y4^w$EJUQkbOD#A=8 z7WY16NAbt-ga93G-6tiX3=p5R15-b|i8rEw{ae#}eY=a^Q9pNyIwYQ{NgJE2crW(C zu1L=NlAd+yuOuBU(ehIsi_wi1SYK@gFCbW~dOAb!WN{8QMzl5}-hXOS{xxCmUsMm6 z|A$fHtp2G@xEK`bXIuO#y1M?M@IZID+n;r?|Eqt*$9Iz4 zt0T8oXKK;Ani{Xf#lkuP^>>0XzJUQ#@$KklzWcrE!1yG}{F;x1&q?NQ7~14yndjH@uL#S8=vN3VQRMLk8hZD0*C=&iz}DczETFsIe0it(#A5v)D6r%j?cp^ z1;xO-Wo8(U{!+e!zLilQkEc<(S+A37u(;S6%Aec%5ca6&Z$_y?%WI*d=*CeHEHG6= zj|{=_u1;(Wh?LiVG2$^@U^DSL?46LV%)sym<*k~{=T@A}#S%Bu-PP~B!!kluH+4B7 z?CBf2>%q_a$n$4Pj_Kkg2`|T*>O?Za|7km3WfSRs`CpW8&-JQ+i^{y`g*^DRY7sm`wofol^Sy6$2XemnE((?Vwb4InG+^*19 zNm%nSCOTg9E*S?VmV|M~H750%C}p{Yc|EcgrJ14oeQVZwx1 zVT~JuML`eo>bj^rI68(QmubKHU{W|?!M~=1^xkJNkSANJpL?%JTkP6|sUrHwxZV0+ z^SFFc!oTs+qC#K0RkWmGwzA)fmzKDMJQ8)sA(s z7rs6Hdv02|h!CL-zh2sJLC<8~%mrG|T%_XZx~ml^j5`HtQVe(bP$8=Df5w%$bSbKb z)dWF}tLcF0f)8afl%U4_PGN5_W4^rHjcg5rT~}kl|G!H(C4RLBWP;- zdf`S|u#wt3O`nEcn$M80uN351= z5=?EQv9BLTVufgJPwC(y9z8#zo?sJL?wSwo4fb)MOe|F)3+^?7f>;X0gVWJ_JUB(c zkX+TSFJ-Uqwn-z8cj2{RJ#1K*{dptRIAhs4hl}Qih!xrHknOzR?Vsw$_)2PS(H(pc z1M3T;_VmWji)#~$}l z){i{(@`YJ%^~nxEwEP)yN#`31iZKg)*LYspnd^TAQ?Cv*;OzvAx5!u#58_C?G1=VT z6HtZ|n!?7ELB&mQwRaiOYj>-$mwV8%moIf zH=VqhP@cTA!ou)vR(Ff&GmNL8oD`X{z)GYnF5HQt$ouhVp!3?J%Vm46NdL30WQ_Rk zr~N;#H_ES-)Js3Yp_IgVz2=F_d->!UJdy2>?wfJL1rO^tus8x1sQ}CbR`?^_8o37i zwJYpU#&>d!5XzQs^aA~9g%o6&E1?+bjWa)zM3XC%Y{g(^!a+uN(SZ?)sarkl37HJ8 zk+-(eP>u4MMrTVg{G{)$n0za>&@ZY#nQR%+b2ot=j>z)J9dWhF^!)SaCB@r&VLk2X)%tUdQX_KQ~nfx$8B$UHfrWGH>kV)tCBl-+LB zTzC#Ts=uFeG)#P;S|yPYFI@oJ5cKMK#QDFwxj-oYA29_3s3uEMr|AEO6^#O2Z?ytZ zKY^+$2(db}YmXNy`?4z^)J&`e#2nYG6?4wW{l9|)sY#^=VVTJX^<2rX1{4d9^}%2{ zj*n4<^L>yAfv;tn{>Bwf%BMu1*@s6sve&fR(S<{GozS5C!4jrx|7a~%fsN{WTeGQ$Inyi5vdTeFkOJuLp17b=&{Vqth zuoo{<#sW6j1wV=oOI1K-Ta!@speHtgc3&NOtf1v_)ne+Gp0xnXEP>X4mC$+ZmnH5oy z3~C4qw7764tsDlU`nAsfX{M^?i|~wS-Nl%Hy6lp&wXnDXtyzX7;58&2$bpZPR}gEE zLZzDC^EsdAEPChTKHgEhsQ)m7JwqS(r+E;I~{NS~o7Ku@_||=d3%AIrmF% zMcOY6i1<$esCJjL9awu$Jd)H_w|=&i1r#Oz)Wu$9fw(pDmn%0Bv^eN2c7)6^+MpwO z_6E&w8JbF~oq0*kD&uWr!j~6o4_0S`rP$PxF#}_3_q|N85M_S27S1A&-9I8yx&Ss3 zZ{mBG$JstPs9T%I}6wjT1#pFyGPoCviItMxZ?j^v9Hf% z*Ot&qLHA$cD;#2v^PDeWio?5Fw$9v0lk%o>fYcVMKYkd%qNspFa+NUD8KVjrY-N^Xhm@*cA zBOvK_+q8DRiQ-O4sqBFwTLcFJl4A157~xB0YGK`BuWQ|EEiFQ?bP8S;COof^tP+uJ&=Q`EhOm0&MXpwardJSTxURQ3XGkBFXTH*f( z|6a>XFn?>+T79x>I^zBHi_C2niErz7w&eWnkN?!MLwN9|ycL7n7#ktGvUsH|(FFV- z{%NChSlk<2*h~DNC;VGSpjO>r6WW5h>=GI93J&@XWg4LqRfC(V`mtJo>Q|^4+Fpr{ zh@gnE_+MsPP#*5PykKuQPqX$@XdF!Eq>>yK4l`Oj#P6bG_i*J9?p z2=6!2SdECKX2A1Eshq`UNb*BusQ;uf))hld@-VDdkfKhOzY7@m$&0@jw;1@8cW^Ep zoFlnFdt6^3*49`~8tZo?9%ond{*DJIUWZIa6P?@>$Nlx$03ezC;Mh~y&u5_`eBvSq zeTK~%u-3xs{o}!-oxEyncS&_BvT1ZTcPx(TxuwRoH3yP=Z|& zBhgL=5a;{PoqY$t-b(`vLZR;bZ;+C6Aom#h8r%@=Z?ZaG1Dr!}hq_eot z#Q{zKI2m08KI}p2zNV70Dn9J2TP|xFkc@lHAK03s)iR}~>nuzZ$u z?I2!?ozo+gr3^gCf>8mt>~#67((R}<#GK319*UZWOauI)ZPD>V@gE#7`h-Zh$D^-a zC@8I)#6N&m&3uX1sao?W&2!h1|MVy~(xM1hd~UUf#BcPSiU}1E@o^g+OJW{~ zcW?)=vrB(8;)XVKh@-(bkc<~)p zQhO?b{}OGhwVEV6_?n4h$pclI6m^|SPN)5Q*!Ti|)!7MKZ+s(R3!U#~6s|Gq|5dKy z+cbAk_^!3CMmSicWBTXQ9VT-GQbbTxMfq|p&PpXbT12C4hnVuiD64RvulwsKj(I@k zGpA%U8M)>H%73a^igaAItTF=Mw)?8T0c%@q>YR6WH`Z6G!cr|vZ~yefoNg&Dz0fw) zzmDvQ5KD`OR^X``v<;hWHVbawp4zR44O06r$q;l)4I2C``Y-sO2vnyEkP)A?k&Iu9 zj36rgrj8S_G>_uD|HcO>t2qwXv9gyw5#XunE&SVxcM8BaHNoL&Nhfn!L?ZC&(X_M` zF;tSXvZnp%f|U`&2sXGwwN80r;I5d zs*z*7_;$P{KdvYAsD`kvI~yE88|w_Mf_nS7?EojZ_hHXGeWX1d+iQGDm040r)&4GG z@_&%Y2chHgYpnBiTenrdz%_`$ixDfcM*+Q&nr#6*XKJ;40NXU@R&~wy!h7q;Sb}A{ zpfL3jyaC|``2G>i<){SJdOtY^jLje7rEC>HZl7SIikOTsVuiH?jWvxV78>CX8C&7W zE4R%X^MV4FB$)H%nshSMv6l<(6xRn z9mj>EbMM?%J@F>~t~Cs^rV^>`@W*R{Jl2*IJY!+%2tw0(mXy!u#yQo))qJM%Gw~A10D&???MuJk;{&n{+_*&Y81E&eo=+#YV zigF%Qa0db~6=8gBql*-t?oE$8W`+Jz5Uu;b1kUf>V5Bi?ktnE=I17Z5c){98(C7-- zCc{GIPFx+gR95<)Le${!g6q4QoMNHdWQHaBcCZrm`+nVBT9komDiri|hzi*C`kExT z-$jCXzB*IAV$1!D?Db;7j~=pteO)E%?$7r2qT=3^us6LV-`?R!#foCu5%zv0;}{P= zpBRz~iNyQLGw`9%(G<3J)|0iBf+oLyNQ1lP@DEu6#8?D1Y0 z0zu#54;+CAc>vvHt>5YOmuthW*Rb&~gnVGKT8ZaZFdBN*?ErQR$jbH1mAD&EbtF8Gn;cvb}di!iGe z{&??Rm|Z|sN1=aWiqZ(b50_+?JM*gX3^8>Tv?;*k%nVl$5?m%FhXy&&+%0y%R{ii= z{fkOahv%Wq=unmhpgLc!?LCi6K~Z6Sy|r~uba^R>uxvS;{!_y7hEnAu%{SNCl7u%8 zQ)1_Bg zHA?m0StF)0>%mBJ9s}Kqrdo71HwPd5BqMGeWz3$E*2Jx!GU!bQBdz@lFuct1B@Qmh ztxU4yK~ZHVG#OQP4Un_Hw*3oHRjYJq>HiN`e-#y1*K`fT&^QE1Xj~KAEx3mOK|^qN zcXubayG!ukjXMMn4#C~s-T5~6^}PSVKlT|gMl*WXs+u+DtXfyRk^vixaYbID8w!&6 z?8cyrOO(kK?^gJ1*DdST*U>}Tq1gV62w?rbUF(s1_J8=n9ZuWzq0;rT4o6YY$8pBy zto#{$Of{SAjT{`EV;M)afU{Qylm>^J@%<si+{I%~@2oo$VK8iy^n)u6WG(ygrj1y8q#OQ#WD2 z;D&~VJm6OA$jHd|6Bid3ReIg<@9;o#Fi^Nv*|;UgM*ix_c6R$+9ZxW& zYJT?zGrx92kKeVd5_U$@Eyj`RUv_mIgfiv1ec2WlX~GJY{3YoCW_(pugWgS&Dg7UH zTwO`fmOhg?u8-_XS@@r3>!tRa9Mo@IPc$J%1WOnFt0ljVJ=PlR9#T72vh)tB|0|Og z;Ksw9A&BW8z65llAPXD!n04~urYuU{&i_r_#3#z`&H^0AXX=@3z`xW9_y{)7s$YVg znDe5Jl~l@tz6;zNKArl!N|lrlInP1A-t`MIdp%s`4;on*hZ1p^j1tz?*6MEbgwB~2 zWwK6AtJN6mt+cwu)^9fsXYun^SKtgQ0LrSMknnlUm_X_1@k!XS%c>XGZiYj%&CFqd zoL_S@kLS~Q|De*Ib<1%fCH6a5yz2D@9t_9Nz}=@j#2;-CW;A_>`RH)S!1)wLL2=Xj zU-tj>=N=U+tW%c`%Q4wEsOdaoe{MY|#NTt~nOpxmCsbC!2R^ogD)IJ)dmP94^tmA& z5LJ#~=9yTS%jX#eT+~_GseMH#adc;3IccSk9Df&OCP6kvxj|vPT3>Ct`_>`I%*S+r z!Yt7AEG!Uu9>iZdX)Rclqqn*BqUyR@7uia9=UyI_@i{54{xfKfux->X*m{;`P%Y6; z`VFChxC)XO(ij$-Y#T9&Xmv;}&wOJHjviJY`OqLZq~qIbxK!LfIV!yCW&&Qlmm_Qr zzL@TlQc`tBvX`8Nz~vyU=|78)QFkG=aQ)Dx$5=vd5ydwO^lJWRyf|z?m9mO&ySaLq zezx$WY-yca$AUmS^La`Al;^5lJV1XfkJ~1Aq3O!6OlfWhO z%jLl(E{px=isp}m7p!pIvr6Vb2~hW|rK@t^mS1wcwoz;m6nOmNeM*vcBk(QYe}noa zUwxK~l}U?$-i)gVJ^Q3pIY;G9t8u^uhkNXIqw&5QN@hA`mE|?r8A)M*c|58?mz0u% zSkALvrwRH9n4H*n%(+kf5MQ5G(K(jqP*Hh#+7;YX>3B#yXy@Depqr7MtsVZ}O!MJm zMNxzGa)~Bv$K@z*%;vB7`!^K=ftp%iC&l1w%;%*h*!Jx222YUf3a$YVH!%j(9*@p?^Z`FzI!yST_o8{C#)?em?Fpx1*CQA7BL z*khbjq?G4AuvSF)7zMq1HHJI+Uv4U4_r+;eP&JvXY|GDQ_})zZ#K#2;E%tCHE4Lqr zZ2gV4XFZ?@MmS_sD~kd;b35@7H4*cTm=Uo#I_w&Hs#kIv$_0df3CqZ>C7w^+uNDU| zt{a#n?V8geVob}GHP%IfusU6K?(t!mGyhLp2W4-Y(5Yk#3$&LI5w_w{xx;ye8~&%3hevrj$=Ev6N4`*8~f zW<6-I2fIX?e)~$Y)9tuO3N(eVat%;+$l`crvsV2<@FXuBMa0=<4W;ee^u(|tj0Luz zNVVAsuHOO%z+XX)zF8Ta-4E3J(rOW!i`DPII!}ir`W7PmQ9L&Qbn4nKj8Ibfk4vi- z^=q+thG|cIg>znTLVr5wvS6Jo2lj8lR`6r5AoipsB*jKagg0s`yXz;JoFOfP|Uv!mG-6jDs&bbjqzkef%il(}y&^5$uaByMfxRVkNeeK5p28n%h8tdzo)q)>9 zJ@E%?`-wf)YR2}e<0XinDpVn$&HiYW!-ugorM0`w=>8$5jxt!>Z>zh&TqnY5QP)$f z$%X6Z_S^3OY{d9zP<{_1RV@av+_>%+V|bOWNl_{pzRtqxKN}dI2t8+=m#D-K>$i}Cp zzi>``OvROqqm5CB{n3sfQXQvxG1+x{IO6^E98m7qvLp;;A>zxVWMi-NdkN&43A+Nv z!4$N`@^TjA8IfP$s1}QPFX|R^e*X+z>)|JJZB@L2pU+-HG>z(Ad!;W-xQyyIBHXnN z#SlkSi!Y#8`Nh+Pic@SnjcdLqxckWJ+q_D6WF6FjOVY!6&tqNj{y<*qNLkz|SR`P> za2j=Y79d}&bw-|XUtMfe_x6sD^*retSBOzKgJW&5T|U@j6GF)WW^i$eB1oTJ-$YmXm$gZ5_GMB|3945aAmi#kszx3XY z{`7bPyZ6Wt^IexY=~^SP^jSV$z!iJ#|EOh1lbT)MDCP_Z?Pv7aki~VH7{P-ea$@ALhA*?ezyRa0y)bKWmM%1G%NR;nh6j8;tQ#+=O+1eYs7=7`=LI!&7lDtX zd!2))A5E3$2}B5mNHG%Pyg1(Lx5os?WmLKG=sj?YE9$cvV7B00j!>4mb8J zVdT)AY}YCC-DNx7mGq5xyCa1YN$OD8vZ69q)>F@5_bv;v`ZR5t4`<)f7!NO@bTqD& zoU~jw{|EZq;MDi6cG9H0qYBGWxo+}FHV5+lo$iZv5qo~#Wr)D%g`Mo?vBC9Kgx*rwGsKO1uRQ1)F+yG6)l# zJ`&XAuv=pFcWU?Aecf@AUJnZB!x1`#rR@=6@W23|U%A_g|DNzkpN1(5L@AR7wqp}? zI#3+=iJl4H@b*qmHnS@xOb)cg`*5z@t6rj{kO+2J@=fQuzclv|l@gT>MQ(VZ8MSVZ zbSklPC?^Sb$ZSY>C^LvH3%*7rRSJ9=5FbogHymR6dfpyva2L&F_#GCykTVr2?62~E z&pZO%Sg_EzjIUYeR+z&WRO=Ow!soJRZt^bl=?n%s@%+XS6Zm))TgZ=X7l>o zg5^+jWskG9mTA*t+wSiGG5vLR+Rr!JF67w}4wVb>9OfghPS5C|7Ka5E(}FX}tV9gV zQ|6aaV@QH$ElHhh#9F|a^n@?MB2 z>L4X|=DNGzaW?A9G~6Dpc$+_&$i^q>pc{MKw=@42?bA1x`ACG{ror?-tGOR^Ugy8) zXSmb$Yo>^7E;t+=H*w`?x{q?LjuW0sfT(Crddq*KE{OmzRg&A&i574VZ#>A`wmZO( z@qb4%PjDmCVU*i-%@5&|=o`-Cd%pS?d#y-sIbRK@<8__r9xAn+>AZmSujsqDo10tL zh{Vzgg;Y}Kv2*E#vH&5gyq6dMTO$|ZairHBq<6dQqH^6IeoA+eS)da~?0u)F>0vde zlw5`)%|uV1(EAOa+um@=^-nsdODEpCRpQ zw3GmjO~*4wT6#K*+l3*a+g2_x{{$L|RQr`vUJMp(IYyY>3nh2{8_-aO`xTk@ z{ih`xoqCLN9iOlqsv1Z^q#h4lDf3mn19*$GVRP#Lw+o=Y;Bxu1oV2_dmh0f-ZcOId zEWU5zNur3bpkEaV+RKOoyqu)UXt!%95qlXFW>5=@aWN5@q^~~RP_bXQ)OJ zUTlfo?%{SHWj5Ta#!7_vF6{V>Y`o6j0hMo>>U1bOCueiiZ8BEe1_^(d%fkX%3m1_z z%w7mq1_Wp>HGe1H(sIz01&fv=g~DV&eUvTc2XFXDiQHZC%Ix{ZW2!dtlRw6N#|K-; z5kCK(ueB?$u8y}-S#~86Q#9Hkrf>f4{-4h}{|i8sv<7tu+B1cPN!bmxeJ{*K`gbsu zGxwRsNmSj>@6F8wv&nVv!xXNW?!S(CZ>6$*2CffjDi*g_?89Bd5aUk2CT5D;A2^(~ z)z=qJ*tj0a$>i1N6Bk+?9dtd|lh*4QjgmB~ELTubrZawiB=ASKcZ;hpg&UOO4IfGO ztA`jB7@ZBKy*5IrD7Seu4w_Gz4|CJyY!yf^MbP>4*X)xsf6##{PJw=$@x&+3SsdG@ zaUFizWS-Qw9Kv!FUbki;5<(KP}cRVA3VG-Atw71OD5kn#U8yLlVz3 z=$l0w!8$9Tu|sA?CIrh}&lyBJ{iU0!BJsNVk!FY%u)LRs>2FAl_(p z{ny=K82D%=LR892qReR!Z+`nPh&@`Q$g5saJ1Mfag2`jSdke444k-@sH!tl1Es zI%x$Z0gJX|pmY_|m8uvq|-D9%utR#eK=Lw!=HSvOV z;$x78trC$7fdl}e)%V=htStrtZ*e#`6Psj;D~f_m{VgDMSjC6v?tiP`_%VO8RyHO_ zIlWdK7?G|2`X-*Mu~grV`ic*weq$vi%(IUDXioRbVL(jR+K!rTo4*gb$h(wKVX!eB zV|H{*=++xi6FYf^7O5$TTzP0hLYW&LVvF6(un@+3BwB~IbTMdbX+1=hf4UZa>_UgH zC1W%6-`SYIdjKxl*j=m$ifo@dbzkchp!zD7JU#Ru&yWnhGybEwI{)x&-+;YYI@SyBl4V6 z1i?FK()6!>u;-smH?WG1^8v7QJxF(RO{=@BUyTn`+0F*)Ip6`o-dza^ZicxIdGO{=B75E(S7a$ z{Ee%-h3Sx>%^-2}>W(%dP_0loU+G5*vN@L>mk`^F<_1!V$80Z<~)zrECEB())* z1lE6Io&kkx$b3@5H&+@tR8}8}F53ViDa#>~$|~x2x@SMJI|}?qB5+hjlfq@E!PzfY zMdtS6j&3!Am-4p^4ND+yczwHcOZS7{MYe-9MPI|70n$x5hO7ElG#q3Ao9>{R)oqeU zR^N$%R4vLRcmFR{cF*sktOc^i(#rM{(kf}t9^EmdpV$>>wwS%2?%J!w!eALLQ-uD`jUMLn|?^p{$9giZ)h0Cn&1T3v=P<~XsyRG(czawyP@27Br- zXzJ5J%og?-9ej?s$P7fa;dK~d@2l)-k`VSQhS}aJTE)eFo)Wiho;K+ zxylQer3@BC7balH=v_#~f;ID_V>-8tEg`oHz+1+3*2ZK|CfOv^sQPESa(bi^swK|K zSf>iY{BnEtZp0oLEbbybJ?*pw3V9x+at>jvwo)8HM%QxnQ84(!|Ed{{DkFN}PCqlc zEGag-bXfgneiZlNTJbu}c71@ncAu3@G(PlMb*SWB{@=7WFaiLq^XPjUUPav=Jbd*B zc3@UJtKx%Aj(<%M{X;es11M0oycTOEI>0#ztOYj{Bl;V4IY?L8Ljv$%-&i19El(Fi zWQWG|Ax@R5SD_w?WJs+aN)_xLX>BFxH+y7$t0v-0_y(!+i?vX2-2CB3Anrx4{Y<&9^|jI_HP!JRhjs zU1q-$s^Q$WPl`h}z!qzXbSktM4=Wz0q8A~&ns^2_jB0s$D!Dmx?-)K2ZTUBDVWV7{ z`nn8!45lvaT6SKPQ6n487SvI*6chR6r{5-gc9L_KA1^DPls{Pk<#_Ko&{9joXj|~2 zSmcKoX5`=3(0*MW;+UJK>P4_WqhX^oQ`pzsDUf2>M71~C=gELQRCE*-dU=DLbirJe5+kI= zU4r$`HtjxbQwx_ZP&B&yml@?Is%w=EkoG>RR)*d_B*O-5N}P zR>3c==$|96UFC~>9p@3HSe_Oa|jO@er>RG4>7yqpDR{!_GJ}D8__C%Os z&Z3pbIvGY-eSrrSSZ=P#L93RV)I<$6)zvxB)1etFp@5wPrw&r~Y-0)W@lp=3;RqVv zqS57jlIe8qjtfaJQ#zvJLaQu|qL|NJjSs9#!8e_ zVZGq%R)Fi_b#m2Zg~vuRj&|m6>pkcHjXVjdgSlRhHrKandUP~xv4C!qj=}1t=sjT0 z%1dL+!@}(ZdPP28^a|pIoVVduKy{5h62_n#oz?N-mmjuWC!DX!1nz-=-TM}yEdkPi zb464<&gz*wJ?lnSvEzvJI$v5B3(zO)m||iygzH39WuMe}xxj%7Lj6X@EJ`Zsa%ZuH z6GQKWE1|!SjfI6*%CZU<7PhDf(FSu?PFQ>8p?u3iVQOWrrb%MYgRtQ{Qy_;$RvVjr z#4hgJg7#C^hht#uX`zC)25r6LoB%!a%5y1uPMONlKYxx2_f+oDpRmxk7{ESsG z-g4_h>aPzSa50}uE~lh#B~Hb?3;PgC@J&LWhV}s%RFkGq2JQg!268Jc_0qFu*o`tN zw(ti6HF^mH3Nr=GegcUl{%ind^qFsIEwvFa=9ZR}fai{mTO$Nqwj3;^6=VMrZ4he@ zO2y%>$&1(C`U62vAzRBb|D}~=fSa@;B`-ch1QZpCgwwF>tdV?*OjhYxIe1S|rAGM} zQUDQJI+57O8_Gw6?u^^QeM3L5y^{STKAt_2nq?z!+P8^YEk8yz^2X#(dU13Vy$-dN zmwZshIY)05rKJy63d?|4#xEst*HHDdgd}Mw-)Yi#(JnSh5Mk~*=3>jQmck* zcT-@i=y)9#YfC|CD9uT&tf;8}FxlWBNALGLApLd@Q)=-n{4}a7-<~c5O?`2aCrK~7 zrO-M@^>~XR*c9)>kd`#!<8z`NUbsHKJ3X<-mu8&!yfbNb?u>?~pi(S03|+&TSIc4b8&`W)(ztCxr8 zUAAbootxw3i{0k0D6DKQk4p`w-6?E!1eDz}=Pw$I7AFowdZO@pwuLdw{ z*!6-3eg5e%C84i+C^#=)LUCUe>o^rVI8bfUuf07DoxQtHV(sYyGLbG+C3dGmuj_M9 z&O&yU&0Xw0YNW*2wnBD%4}a>q)JV-yz5-XH1V&kog{wAk;6f!{0zf^xJT2kgN;g;< zo&PF^&~aRt<`RY)NS~t%Fp<)(FVObyhtzz8dMjYIfPxWXqf!u_^L4B)w?6o4!N^DLSPrf^!3cdz)M+6Q zKFp7+pgw+5Npa`9xcAsiHT4p!D>IS%eb?CfC$+}*KmgCHGyY6Yf;nS;|RlO%$jam{%v4Il9fm=~MY7eur#xOcY z>A|NU7G`UUjlsx;e_bA&O~tcU>f8{2EYrnYM<~TSDTs*_6P)iUf3Et?T#5R_46ThB z7Fm-T%O@p8<%TSW+#hBan?1DwQ!^Y;rVd%QiVaz0FaNgXE2G9rZDNrWk_M$Pv4^(Q zK7}hrY+!H8FaK_hC&%YTpQcyJavhovg9)ptQ|%wM*&(`p{2SYqjcEc6JbqD@e)>^v zayny~mjR9^lS(uApXtj}iR3b7_)cyUd`8XX0HA48sop+qZigLBRqrYt;Mrt9u!61U%+*w*BP7e z+(+d>;I#s{C?4YmiSBePNt+H^5)7XO#M^9Wf0?wlUIOAuSHo!jn{T-R^_LKvRdL^7 z*I83_!>}QgGgdKkw`l#)Bj?5S23PQjL;dVFw{YQHNsL-0n~d<& zuM^@VW|jI5@1l1Vk6o0Odh_mGIpcot+9Mm?WFg6P1Y<;JK z`g>T>Qjr`O1KkrjRs;s3cI=YIY1wW+8jgvd(t+eT`SWm=nqz#@tfMkTdl6#RJ8g3ag7J*Y64`ykM^N z({H_mBKe1;3l9;9%!gsO^bLQirN~S!n80(E%8=X0#rKos2+5s?Z|MT{{#fw8s}GWN zzuKE%wO-=-x0Rr=XD?%^e}pR6YnaL4ox>GhRISCK^SFZIji5FFhk6IFBgG?d4p1l* z{;*8mC!BmSv4EnkmF0CapHs$j+zEpY_Wcekgs01$IF~T#dTx*YPzg7IWzjYv-b_oPv78EHhASZK@U2T(^!`FtLJ zkZtdHmQ>@|_u5V<9xwVNc`T+OHNtZ!rIrY4%OF?FG5Y&SjpttEclv_luWZLEU%|mr zUWtZsF^LWX+2k-rI-FH-6_U1+C_?Y>JN>Z=%ui29whL7E9r)CP4Y&5;BnPx$3P%rvMz?Bm9UN^FcvbvC&1Qsl-nhG> z`en^rEzcr+igvA0)30YRRJ}p)9+e}fSCS#?FYQ)U9P7QYb?VadhGlA7&KEa5I!UYP z*6lFmmWm2Zm^G(CVKCo!F=e^_ls*77 zJO-uflYW4fp5F$wl^+hZcEC4Ud^4YPH4LF)MKVntO5!q!OPF3cS(tGwg;ty=G$=4I zf9&)6l2x>;q@qRPI#?L@e6#pH%XI_EDqGVE-g-He-Ga(oo04SYuGeaS+7XEOva-KZW&prl15o!9~ zYKYyPuO1U%!tb~yrb{re)oXK6xWHPN&hi;c1jeWaF3c*;aunf=lc>DGjD&cS()xnQ z`y7R6bAfQEzaDpN#xp})d;%~1MJ_AIp>LV-GMj)(CVU5Hy0w((hHk&^(De7-LV59V zh`}~$O?zTf^s5GT>z(S4bd|O~(tU?DmPv=;w(tcs+Gserchl7u8&=Av@7hXvhuljU z;sc!b%3JL$^5y+{Cqp=`OHJ&!h_?;7l60Q_*e>T_EE*x3W_Hwy8Z(+ERr<*Jo;O6A zcYN7lRL<(Vj?B&YIh_r1nbx6WcPbTUNf`I-H40;FQU5sWith4brMs7@IjD8752tcYuNy zC7~EDH{~wki+KIcqG?`at7~V1*Y&iDQ81D7fU=CPH}8pQk!+^n{l&K5%1`0IW7jTM zQc#)Dytx#k5X)3c&xB}{z=J7~&Gd}fO&(O(FV)^GHj~k-mk1o998?yuNH7fbs3A(H z7(q@K%J|Iyht;jkxg@79U;o*+73W2(_-f#+{3VQit-|?a`&tF{Q}pBSo|`8M^xC~X zx+R^y!jc_e9FCaBu~;zOmdLW|5B$nAAgDak(}*={=?gj9;0y^IG;P|Nts`tKm$Bgs*XXKK{X`4hijWmzOks}~<9g+iY_vOLj(RNLs zz7K`a+?8#Gxo}N15Ne0OeUBO_0r9AEV;~s5_T)k@YNgNFAd4Ke5>0I1}c93 zb_dbhIx-0)FJ5i*k52}H-)MY`zis$#Oa(>5$Y~a4i7#_33M&g9xX4daE&2CPnM}Qq z*+GX5Va1em-AcNJgS4mdgRc=j53cjxCZZy!NCtw!G;;ivSK5qzflkzQe=kf!l}~0v z-6@%}kkYY4gx;67QyPx4ZkzFS{4gH2N>W2f1GhQEU#wXr^$uZ9<{Vha53oRjf93SG zG%7>aJ*ynjKR+sd>(IukE?m$Nf$sIf5?Yd|Q}`ilKCaA(rK<|v*JEI;DOVIjudJYf zUfZXxt(Y96(qSGKyb27u`4n~P76`>^TjZz?>ROr2tO{?_GW??5NXY3!(^r?MU|s@J zTX4<4lDMI_MChutZ|9OV3M#FK3*+A5&T*Y?zge2+MX4PE8K2n;*|iMsA@id2jDmhh zdy4%c)97A%kCoV|D{0u!1b;SSTD;)5nsnjo+mokrhPo={8w(q-yR&ue*7aC2@dK_u9GK83ydfTzP~ROlAWxeluPMtM zUykhg21lO#8w4Fkw3;2yFTTu5(=P&E$nQmv5a^hN({gKM0JQF2rUlG41NAWSUbP&w$+~oKv zi505eeh02c%n3#DpLyw0G`p>oq5iQ%*#eYI9Rp8c74Wp0qe7yU2n6m*WKZ0zvOIZ2 zCNyU`>2frC>6NeM=AfDnS+P{q%#@I^R-Kwa)dkm?rPmQh`1Xq~LUz{9;Ux*Tmb_Bj zhUGe*!SrmWd%hdR?sNNg0JIAMio;tFagC}3S)|tp4Kw_&koLFcX(6qEVfAo*X@!+n?YS&9f)hhAH%ft{8N9;^gt+87WJD1 zagWy+$odQ@d6!Yb+#NttvOu=tv;RfKNqlc8xtln2uIansseM_yYHXqV9xmJrYAGoNQU3=;j3Iix*ZnT(&Ok4x$TTER_dltZJfcf3kAsLq;pECjlT9Mvg_~@qRuA~UM$Of=9Cb*T z{DwDDsvU%C#y=W~_o_u;R>ED!R`zj@6PTj>#hy)r?Oh6ff2is=%rLTDl0|D~SsZVM z^1*&p>T)Y`EZxY>A(9+F0RaNaN1;KZ#!h`1QK|Nvm)~D1C@o_*RsA@^v{EwyY}%=7 zDIHep1Rf|9+_D1Pa05cQqYB}-;+64V80=bpQ5jbwO~O7CK$Ch$4L|ksUApLD!>mm_2Rk;@L6uu_pQq{rNxy$ zFHbhD=Rxo?s7lqZjn4%9JuGj=>fx&zc09k+|WRExE2)X*h2fa5{x>;XMa zBA#;p+~#Dd>47bP>a<{9yPns9sZWVTqjP`0N3D*)OfM+v+r;HCd70E?LY zrA~8IdMjVix1qzJ^%m*8A54W%`3AZWvs_%)KBn8WH;HpvBu8S(@%g-X0>CudM4VcU z#BJ+6`;S6RG*CX+S3eE)TZgM19g#@_7=lKP)=2$Ha>2G6m48feFEtpA2w$B|aWu#~ zEv2ug-zaw$+w_-B(>8V{uygPM5av5jv^${Me7m9I$5pZcCr$MT$KqTs%%Nxkg#Z zh+%s&r?s#{L!0sSwg>&FTYf`etlm` zXWpfD0Ad0l^W*>eZ8tDZZm<^7D2Qn4zxRr8tm~p)TK0cPl98xtH_KxXFpD2zEdz4| zt*U9-`e?kq626jRXE9`qwv>NLZN_tiiGBS|SnYhC<-g_NG7W0Cx!#4G3h)0 z6--Qv&UfaU9PRBdu$1&sB*+vh_4CPeyVv7EH!!TQFoer=<+##a&j>7v(eFHp_hZ|LTrP2OrMkbMp2S@B=UMXILfAZ7i?RN)5Wi zWt=D+XX4wDZA}rfbLxPe-1W_t7|T>?UoEgUTaW5Npx(()deXY4KUB`^qOqJ)ljW`o zb}U!63((7wEerWuiXF54eW9zXJf^B+bjfdHCXTMQLQZ2fCgv1Jd9d4tF(#RkA+cAA zuAtzzJj9AqDt?MCUC}J);3?BZW8}XVs+8g;jsRX%wtIZ*O^K#Ky@Z!QuKQ6*>!Ljh zf~)eyYbBJ^gfeKM%>Z1|e$D|G|?o2+Gu=do%y+`8$Ogq%7P){VJ=I16M1bS zJ>%pR<*`*lo1!>c!7)z^~iQ znII8AFk66UXd-e*RxXLph*%nEV(8^ zRCfTKU1~8&pOAj0rVe*k9oea`PVk`6tFSf9CYAzN*5Dsan^5ZehV=UwvE?E-?Z$|1 zW7`q3NfX=DalRH<8|g*&KvOH9Rk@?f=@$Eo5@5@@23Y_hVo5oB5FL!xND73SBb`X?B|5uKEW0dP-0B{?Q zW^nT$Rcf|61dwM7v1HkK96^Fr7<6Yo2Ioh8cj+f`8L;ao+TEtwIT(l~Yg@fR>qp~S zgJL-s<;GUYalP33%c|dlh@P;)qo*fazmj=#UvuiK-yyp2N&xkKZ1J??Rhw=RfdAJU zk(QrDcu_H0b^n00-r*Zxt)GX%rCY z#_4Wq=im4bit|1jIayhi&xC4IC)LDi`fYe>Uef14{}d0QlD5>*Pe#XO?&K9b!cNL{ zI9vO30gOtSdNF@_^H;jBzM^klB2i`Jn6-{T9EDgmxeNrp0Cy*IpukAGy(YCc1K&{V z7NDEUDE#z5Vwt1asbPmNB59%QsS`}8PrW?~U_Cv5jW2W)1W#GNW%rW}s9qy^Hp9lN z=f7hGi`d8Ly>Y-@F}xp0avIfz^RAJxtzy~T6NdaWREFN?Jrvhs%kVO2m#sx8jR}mzVm&`}ce|)-x_GIj#ENf}E>TKJr7SKC zr~fYq0 z(BeI=jVJWJTVDuI*mSLi{>~~#GL%F*Oa{cYiS1;a z3jTwVQY_9)ifF-?#RbC&TS{j_6xOF{`K|5)#eLWYwkL=U>?*_M!!3rD)7FiI~Fyl=)X(C~e z>abTYcULErlMgWbvS28RQ#*!OZ_I)g&Sj~WA#k@AupNk_T@_D3BzI)ObLK&JnV~ye z)6b5LFwqap-OOIw9szz7@E|;rF5wX-eTAMdAgS?#6Bs6I^#wG@_+!y>=|`U@xNPc& z-Oh~F&eGI9C*--d`k{M&Go9!|^=`9g7K{l5OPoGO#a+mjqZG(DRGo=$K!Z1pN};BH z=BhC=#rw6DZjEPJ7+rB_Vab(I7h$=U@|&~ArO%7$4$g1170(>hFr+kbmvbjvR1dN` zWtkUIe2kPQ`~GUv&iAwcO694M#Q!iY|3iz(XvFD&dG&KMu*zFJYYe&<$T&>2Z+)Bo zBCYi*0N8W#)N>U3_z=MTYsf!2B9Ri1Fey!=iS#qqk-EddrCSZF-X8N_x61`4V_9kq zHU^3)SizW!maG1b1d)KX@@C$5T4u%Z7`$c`ZtV}z|s@)MEvr*@gRHeY_)N||ZM}RH zqXE}@$2{D2ytZjNO)EJ=@}GeMs+uPefORvFMa%x}H}v!OXu;xpOcNAy!?^$}P%{(`ToLNcH|#kwj` z!#x8&Ma$zmetJqdOjsmE?&62n$2Sf}dEjvxco*fBZI%{m`1P9~5GIfovx@YtVZ5td zNkdBTW8Ob_@G86GwkRTw0=qPeMk4AYO_BGO=vgjbJtA*qI#Z8WJomlAH8ND%|hcb zZvO}8*8!}ZlZv=V-3|)CQp2*Fk=brcb z-+j^)qdX&sd>#}m5dHE4)LuF*P@+XNQmEmDIH$62AeJ{ZO*qi!ISfW0UV1RUPoao) z&A`o}R-t}-FE-#Y%DrpKG^B|G{qr}~s@s2bvd0)EGn6h4dSeyHb=a$$KjvWGWr(3+ z0GKhwZZsXxK_|9&bHb!I{(bShhYxd$hT`JXjp*>^V><6-^KT$p8pbx9ln16*l_Rg+EY$n(u8s2AH?CLYQr(%wr%M6 zp9eIU<&~5QQbADH7!`0QD6M<%TP56_FUsJ~wxgY^&U#+?s7@GG@>^9FKt#N!$;R)I zXWxZ4jvHOQnd~TKGv;=@VW;4K7G&1fB2bHcPnhM)>Twk!ASGFIT=lxW9v#V`LXmhv zF?MCFUc(AA(>S`U`FHHKLJ*=e><_+_iAAF>w4`hsmxhsi40K!>c{;w`t?}vRfKm`? zr$7Uwjcfy&{b&y#@+r`<+xKNU2ja(uT6&w7ac+`&z&T=Gij7ZQp>Bn$CeurC0%_CV z8_*vmyT<$KL(V8&_Z(n9| zNv`lfIqTm6-twIMuTLL1_aB~|z!wcLVc{rXO!*IZT&}<>`gV(tEdo`H|6}ZgE*rnT zQ8Q@9e)4!gJE8d##b&)L(0%(EF!?4q9ZUCV+W)2aqS+@6!dgvXF#y%vn>~Qo3M33! zBP4kE9G~p}$#9yj^<`&`*ibrz&?JSgJv;Scsnn)cNfw}dmueNMc~3e1azEx{Ud5Ph zUl23%_k6$7l`lm^a^V!bQ9F`4TD6!IetiO+SZ&c4OJ%5aFm z!PIXn%8uvz$!GY4i|?@MPd(DLX0|f!9E+MKqE{m9?^HbjY0J!JQrc-F@&EyvHF2Hm z80Ke}2n0;=Kh*Qph_`hD$^u%~^1FN<2?ho;GLsNg4A8TtiqJIhp{VYicf3AVqKdLV zH1?_GbUJWF6g3gu#_zJMSy1|wB*Cc+0A5~N025j*eCl&Zfm5z$=BO%$=^Vxu`k?R0 zDz-jv@)M3CgJkplR9glUaS}7tTHDwL8{hG>Kj%j%-Va}otuAo7u%~*tFVQ+v3|p~e z2V<-#K*t+7V)-*^ek%_rA6HZfSYE0FBPyEn~*-#39(a}3}oP+Qbp1zEc4gS3u_$pBoKE5#7byy*JoX#2f-{dv!{DR^%K=tJ%{^Pp%5E!VughD^{ z3w>B$5j&?-at+6F>=P07iiVQn4ZsY5+OB0t@@TiW!Z^Ua<3YjHLIJi(ey;D_ef{2lDd-vqkd_t@d3X z8}el}MhPEY$0ZMeL?j4ju%zTtQDncwO1t^%L3ITow#VTS&^`EfM|L zi?kh@A(u5^#9+ijZ2P5voK`)-Thw^@o7aJ0L8WHzyB$@Bnn9^&fo-H~V%QYTLODZg zotG;c-s}Z*^I)cM#wB^>3K4PX>_rL2=P~;G3tp@S+{v<^?z{4d4dk?nyyMuC2I;i9 zl7P{{&TpgmfrXaecx3{*_lMfl|Kr1fj?O{I&ad(lC3VPHA9*-~bOkYB6-!&eZ8oD6 z04{F-O=bShsWvkkiAcYt0J9a8bI;cGwKDay5$I=Vf@UBRGmJ8uoh3{k=Zb4y*4H5= z&h00w;87d`ymh{mqdqZVQl@wp934-{4d}Cxyw$HNcFZGb`gu$@X_E zd2<}Q0aA)%^=o^P*Kr%pkwkQrX_c8W;TsJ`sR{BK%pJ4Fb-^W#ccd^SZuH5GraWmuJChL^JD#SKbVZw;;`6AHX@OD!$rII}? zDq)NBpm(?{Ma(wQ4)1PZIV`C~dDMG}-<8`yQA2~&aE_2#o+P8xaBflYEcUGrEyS!( z+?KB5ue0X;WNwSgJIzL-vz0ohX9h3`7ey|Lwa@5RG-@Ol>t-ac!-UlMFbS7J7{$H` zv0K|gLU;*{F5~HPYf4%)-JPU=+N3x|$6Op`wOHb=YZu8BlK(1R_7t&;dZ88Mg1##~ z^Rjk)fpRR<*C0=(c%oFXsKclw5XP@j6%qmymoll}D8-=cw0(+NWo@pu!rtE3o0oGA z;o71uPOm~OQXlCG9=BNUziE#<+AQSM$uy?MppDQB#A)PxJXY@xk=0wFkmhSf1$yIj zxDYgZ-jWh{R1UQrVnZVkS2ik|pX_J^3wQiA_^GyrS4O{aqNjI7+)UU5@NEd-LNa8~ zO&DbQ;kH?M{Z};o@GGM|A>FQ&kP0M}OiVrRQCAKWbLsrQ(d)I}iyUzrWVl2_O)tsy z^1JxWQq|zsQ z$%Y8Kvj-2jLUX+&;B*&aM8BX*O4a-P^)teYWA%ndF~;QVg?dC?cm^9*O(K5D;b3Pd zZU;q6>L{loNcF?!Vc_6*&Qw?v6tDci6kdRH<(RY<=7%8}dii3>O_S0veR8&9bLJ=k z$dp?a03ef;tQfn-ik@itJd_>A;Rht|{sNa#lu#TU1oylyFHg~8 za;}e*t2kPiY*yPMcG?!!Kpm9aa&?5Ps&U$0a18zo$Ewl1(rNVd3T_nb4?CV4OghZ| zNRA2jUi5kSRhslFJyLm?2YEXAPuHy86(M2D(PD17 z*PeBpNv~(A$NyY-CuPn+pH|w_vI+P~y&-M;{voIUO}cDP#~+Hb*;k0L^$OUM?TAyX zpI4Vl%)0}v&nx*Fq@)HT*a2jbanR}(7^f%IsAvmdWJ+>6#;(J`!>0ZYV34@Q=INUE z|1{>x{W+@!eGF}p#BT0sOH}5~4~h|qEI%V<77>h?PdyY}xxxTMum_1uDH+yv7jO0W zG7~>R{9Gy6fKr|CS@aPeNUue8R^;qx?k6sO?aHADVz_VD&Un2IKH` zO9H?m;K~WexE8eikuWdDln$bffSG$rgiTv}zt5%znL3{riNa7YK*qZ|AY!_LK;&rn zuibuSZ}Ja!#*ncb9?;s*Bh-9p!j@-Bd4ms0w<}UEkfXL7<01xsP#9eeQU}Bky<@y) zg7UFsicMbB@km2hHG(-p@bUWd$w%!P|JG_Qm8EsI=0$N!*hmX8MOSM*zC`{v|1HG9 z=RvaABmC(HvzW54R4$U>kF?UifBEJAk*fW6gM|Z^q2I3a{qbQ$M`GuvDpsR*{M?*0 z%p6PAk+t;FlO|ZHB4MJfyXR*aZTg#ncbSyQT7>AUCbHAXw2TT%d1}{Wj;ZGh=K4hn zH+Bg;kZx4ENfEo3zY5p>Ix9Jjrj+V3ZQWA1Nb8^MNd+`LL(Ge)sfqEB z5AJrNjh@GvF#;#6uvf=u2&n4u(wEFmRca6Hnmz~2>}?x zDi=ybjC8YLg9j#Es33I~{!$3zOufc#CUNc~%NWasPxu(ZG!}2NefQXTA2y5FcjmP; z^&;gQd!4UYe|f1y*z90LhR;W{0Y*ZI7z*@R8kd~*(3h8df$q$;ncbQ6=@Cm_&|hdtn8D| z6kE0JJBIf?C&VsiMx@f5JLZNJqaw2hdF^XfFSq%J6ea~3UL!pK_umz=P+NK`Y;z1m)cq3K+BGzva$KYFPl`5tFletd~ z?rJ6(!q%p>Id9)x$Gx*4?Wr*bF^|M&>vu&cNsTp1M0t`#6SvB7yEztd?NVM8YA(gG z%>C63b;{L_>MvSv{+LClFZSovX~#M4=@sF9RhKkHz)7HsYABY7P&mEmibNj)SSI3d2^dW6SKwzeo=<^&~# ze*N2IkvO;tEmpHiS0GxZ*5pfh#&YB|T@207SH=&yd%F_k0}9;{WP)?eO+j}iEjX=R zPx*mN!SpK)+;f_V3GWJI)z>0yH6J1G#C|%gNB`E(zCEw@T?Eh1FCQ^eHh|nhGBJ)n z__B@NgO{}b$orDtd0wJUge`WtZU~AU|11a88<)HEtMAZ0Pt7h+#Z!*j|HTPDn7U(rutQGd0r1ji8M9YoCJGw&pbNr~ z;b$f~U(t02+wlPRum~$RekOrDs|So!*~DE5PpRJb^3xH}59yls z(o$nDb@+C${yuKg($@Z#aUg(nEPzM}bqNW0u|7az=Ux!)6Lh~{u&N$9_aJvW$u;@) zG&In)_sdn_Q>>nENw@fVu338Te|nPyY|Zu%+wQvwW1Et&7|*yG@cX?Ba7jqwFx}mv zUw-*{g;j>E>oZbRFP@xzwXa9r=7)OEIQrWAju_XSuFLnsqV#$v33H; zucWR?{&Ex%)A$32R{fz1S0wNwMr{r-A6Bnq`{=+m-=4SD)-zbNuENbnr#{SRMNZJxowSdf5d5I ziAx8{f>26A7%+_jM9&JEzUinA*1b{GRi~BPMRlskE>TKO-py-jk*Y0v-9u87Tv9LA zI8kWOOYL1**wFo%N$tU-Dmx^ueezd)yuIek1mjw4;90}g!283~JbK(!%jR75+(Q20 zoQA^9NZpOc%B*saX!<$y$`klMcVjBjO|HtUqMuR9SVWAeQh$DyocYsb7!d{P_n9>u zr&n7UdpymX^|OdD-3q5{iaqb~sz5Fgz|~ufLf$XTaXR?w$}s;f4V9x%^wqnsD+pDz z+O4ezV5iqJRv9Z*$e-tZc(h-fkD)$fl|^7fo^$Aurad9$a0PToEyckeT(MJ&g|b^g z;Ep@(3ZNKVSQxOsLFyIV3c#<_xq%W+>RZ5qPlK=P7=6h<9wF$Bm6GT|!Z*SL)9vKs z$Of`lTS0`PplY53FZtu<6QF1!h5pMWj4Q1x{FywZi>~5px=a$T zQ?MVC5e%HkmMl@UA!`v#Ee>mvx4#3)=lYg}(E42w5m3_W7+KM^MhRovHOvLZvnDXA zLJe8``np&an_Z@tIV-4n`xapjXOo)`P8F+gZgTQ$k{G>@R1xr&shRO3^TRj{kZ0mnYRHlsa6Ah2MZy!PSbR*0?>DH;$ zQ?ObQk8zhWG`?0M=XABaP~o`^&_TxF!lu4{W&)|}%d}29eJl5rrm;xMeM;pg9&z2z zMBV1T(O3M^;o03i<4N|DrLvZ#uZu>9d-{ss8g`u~SuDSD8Hq1)D9+2$OZ(i@gR4Z! z0klna{OidBTBKDNyr&6OQ<;X{)?vkqk9zHGkh2eUZ7D$h)64?Q$~PTD5#gjS@hv+x zqi?(`_9mY>d@Z|Pi5OIsPr1^Ud-Gkbu;2v!SeYj9Pnf^AbB8YsI}oMl2AqtAQz#5l zK6i#jo`mt93vqUOSNe6ZPIS8FulO0kmq%ape0bHgTJZ|1y4CjhkzYv}5Iiw&)6BGW za|zNC#vBjw%f}TdQv_zM{C$o3=YiI}ccH$nSEZP5$@|8dG-I0NVLKPl&tscMnrVkb_uNwE@a#p zhd9sXKk$m%+0y| z&zc=Om5y!XCE}iU%-FE%^|WA~O@C8oxrv$|`3~Jy{=ZH&a#e0}+)mzT{@vtYzl`>g z9M7IQ`Xi7`S8dU4G~t#O{{C3gXDF2$kvAD{IL4Nl{~ZwsH1?kgQYmNCP!nEH7;{>) z&TYweV4TCL!>WW2QC<6n+j^GVIDr-aG%XG5Wzo-sZ;2tAiKOZYCOv0 z)6^(vi6bUfd7(PPkd&?aP6D8_SYbA*JA(7WN`}m*gE$tdDfZLQt;LfUyzpqP!gvq0 zRxJs9g$r4;dnc=WULYEX@rMX5jhIc!0aqA;PD;NLrGI;ct{~%)`GG4&q#)hBSjXd_ z_I}mcdp^AeNKSv9geTF4P$sRF-_XL z{I51Uf(o3iTa+SsHwgrj=0NH~N%JV86b;eb1A3Daz+t&440>wm!%4 z*z+Q(qd+vm;R~@wQf~*PnbyD3Y@6BhY=}DmL5}h8Hz@`ea(ejVqu-KY3!wfiJUK=4 zoRD|O3{SH(!iGWS)OJ9u7)^Q~NDo3@>}fDrUoar<)Y~M{Jhm8)JOB;{23KtU;2yBa zh{8#8#+0h=vIMA8HbU4%VtJWI(dzT6$VTv^MTkLQ%r7N371tW&<>{klOs;-Wi$27CLHu@cA z4F>!8v}#|`KEFz%dtyJ`@a}ZSaRZ2IRm*5g=_YpjTB@da_IWck+0<_?N{=W;#}N<7xi{ z4!!c~6;*SyyH!-2NK$?^kDx>cDU$-w=!)=*RG-jO4ljbsfBQAE=>>$HUyPX2F5BDqm4)FUF^^zmXheIl}%F0OpsXa&_`l zDM%np#E^GtVSzbzWc6?wzsCVpxTIHmpaz%72|yhfyJ*t+U6^;_uf01?b?{Kk5Z>MP z-tahr1L*Nx^?)&|8KDK=69|WF-l~sjKL3Ijw0cGIW1N1LI?mCDR%_u)A3<>1eA&Ex*08-=X}*z^d_wgSolisb1$Dqgq?TSA`C?4PDOy1m?L1N8 zp(!Rqda_k}&WrCe(ry^aw-9lc2`1`mpBdpekeQ&qM=VcgYnHSj6js-}5>YhbF20~i zRw4d|D-p(U8uBJ-PQ7F8pPZxd|68t?A`Z0oFPiuqNL_DXISCp82OcAEBjY~9#Obae zK@8JUV{Dpy4umXrPMG}rtTC_h>P!2ML0|;R(r2;E9H`d*&qxDoHwfMFc@HcX`F+V> zr}Fo?kgPIYfs7A!vVzG~S!860JQEu>;MH9NusF_X@^`gCwPY<-{3jx?8eSkePK~;3 zhT_9t5Gu^vbvT66>RewVUeS0=`DLCmR5pZ(Ix!|^Wuy!fA*rz?|{qTs3 zt-9JXDI>_bYp{u23-7{3u$n-hn3CbXhZJ%`{e!KQsp78VKlV z5$)!1%BwZedtAg#V%+5c&|tZwAD7|`^WoAZ>0!)xtjbD~t$5l=ZQ>$o>4x-$pv#On zy~jDTWK<${FoiA$)7?iT%S^-2R2wHg_Ce{SNA%Bvkz&W!sM7{IU|;y`_BL5UD=%f4|x|5 z$Ge?L08F!YVBnLhvH%+%`aRK2Y}%MAwqex{&y>t|ekK(q_MpBQoK*(cG)MgphK zYw{|f!J~osV22R03=jmMh_Y7G5M2kNlQ>Y5j@(!~TV>AV=FP9Bxq`HBbNR6diDpwM zUP+^m01DYRo*&mFGdxvt_1nFZ=^!0Eae-`X)z(qbmLrHS@Aw?9(lBTdx0mb(m;`Hg zXk&rH4;JUo!ZNY}&k1387cMR0s1E@3WjOx8Vw#CqpTKorRI1!`Q9*@)CK+B6U9uqr z@kRYTRDzEfQdc)w8PJ*LeE=BJGE8^glv7#jFswOr0nLY$kNrurgrD`pRT)mXRIM}y zz6a81{3ndeM=BJx+WWMY_q!wXRu+zE#yM@YS~#+o-0G>+PRcY4S_HXWh8dIiJ>ne?wHPd)5XDb*Wfc|__?*QFd>k3l`n5x!{#q!HLP=g! zHuSOx%S_t(NGFEOITY#Td99d|Ukg~&{&eR#b9*38A$;ZMi;4uqG^K)OPwp5E`k?-~ zavdOO1Ikqq|GE4uSNydnpmG3!B3X!b3&%l2`KAV)?LUQ#9%Vl3#K~v`5(>@%R5=Sx z85lC$9smT9F-zJ^*LJAV7)HP|J6z-d_c?R_QkrR}?b)w0t*K<-i_k$m)(V^Wq* zrIAlBS#wCS0=w#t^S3KLw)#T6jxYF?Tz+|Zzuh!I~)+zS>xMOuI&Xmo-j?JualKXAJE5yEJ8ePg){;Sf$t$$n9*Kg*_WAg*f z<~**}RodPt%3NNZ7>*eSl1rP&3j|B!Q}#eGXopYP6^1)d4^uz5gB(G^M|=aKbfD`x^vSiuv75K)HXcIAJv_Vc(0@Bi3I z5it>w050RicN^yt*CT^4mw&>KFxmCwO~$8|Xut$8wYWyK#TZqMuu%QVbbKSvKzuh+ zjcAv!YNcD;TF6`cSvbS; z<#?KMY;UpqpE$i6+9=nX*1ReY7KH0KSIS%vkk2qMoU%zx!Kj zetwTMFj~qzGt*R6rBcwsrx#;;k|gZY_EbH|9jh=I{DCN~w)cJ*9<6_-&dThJlHu~_ zo~dUxh)0qE$>q!PqAuJ^<##uJMm-2Ufhw>Do;`%7l#0`ss$^?jtcMf9L5g`V9&LI# zoFyAtJB&|-p$qEmwW_tw_%r~zpr{OD1vxo?#NW7m}R_-(v9%VR+!jF6TK}q0JaxeKc|ho*e!Ql-}F@ ze^fdQb1qGWr_u#*>Xhe0WoHl~hd3m1PgazgMo`6kCg}7=fx}q_1&}1?7(jjMp)GTI z@yS{*jT`Z)NL6c8AAx%2-&ke2XsB0usqrse^^hNE(v%TLgLt~dj&;1ZvjPW4zAhzy zAAsrp-5ed)u7l|VEQT?tS4V&H^QlSOHOg`HqOnJxp>7va#)z~1`53wEFe2C_C1Rn0 ziuiMgVm|AO4VxS`rOIKn+PLA4chQSt@h3&@4J<7bQ$A@QaO9gh7 z^reQQe+e@U^L@1=n$OFi;WIiOJ+%XxnO=AwN2V@jS$4-rtAr=&rN*IT1>#45dt>Rg z0*l>;R1Fb>gf>SkeC#jGQr_oShpJtT4w&wf-MxPjBHz7DU}M9j4G(|1j6ce*-nd^i z;25T&oUTz+7g#BY35Ua?*P=0{Vf^H~GTuoq{}S+-tS6bK|LRrhRt{%S*pF0|5{BA^ zuXXl)pom*mx5F=2lhZM@Zfo(Uk5}f+QWwXh9(`QVo;qloGl~>F!1dS#$pVc+mwO z5vB@S^_Cq|z2jPRR3_!K1z{?K`5L)yDBZm+0wtj9(90?fAdAm-oW8ffSW&13J* ztG#WFm7hm|1|t^6H-SaJEM>Mpp8Q4K$5GT`P$b0KG;YMh9t2{YUmna)-!jgYLOX|4 zFZ?%dgSB5S>E4tWwdh!%EZcHPSZ)nC38O27iE~tbmGyitVA~i;7-JO=(-I#1i}T&!1pYrq_CAZs6RGjLHF9LaW%hiNz*jTU=v z%HlS(JaI3iSM>CUQ2E|j26_2#yO2tyv#HXSe9NXyiY<$v%T2JOra132M};GBH0p0G zcA`*86d2j>Qp(?7jZuv?T9!h6kAeGJ6*MM~$79OqN-4?COhgt-Kr1R^{_>w_F4u|SaALNKIZvJ2YbmZqjh)iwThlcr^9EhmpvF)_DI zY8aqtZqZx2`R%54UH+U#lJF`)6YA&>;c!GzW;1bxHSDcWY#Z0MDy!vW-1UQS|G25&#oCprSKdJb5i$Z|xkH-n`E;`RyIPGwqRAyeI-FGQXRB6Tw2lDThr< z?WTJ)QUr1*`$RlIg@7HfM3jSWe*CE(hmD^^Lz%$Oj$+=>&Gnu(E(7$GZ7|?-b2DgT z&ome5%jdxvYk~*;TT;Xk|q;0{jgiP0oaqFh4oSn6 z>$UJ855rVKd~XKA3(J^026 z|E&J;L~66lOe1vU9o$8V3DrD zDC^d~r8w5v!G%B}$rpOf(LfH~8T!L&Y>-PmlR#kxF&)Yq`t>xc{HGWQY1QEeih&Q{ z`RFYb*YQY2$gTurn3!>IVNHqD-LD#hGDgo0JI(V8X{Cq7iBF2G*fI56%t#WK)-DOj z-}j`Jb`s+7?WDcZ7#~)#HP8E%tBw!0Q1N0KN}9y@%3IqU$s3!`XKdtUzmrbZ5~&M< z_Std-r(PG7cV%~LYRyS5;4J-3KN&eG0Md9h0@&|ZVH}~7K0ti20UU>1!e~`aB6hC_ z_fFDlmjdXf8cGl|;hPcNlu5Rj>HP23s39)2Tm}F1de}q`uw`Hl#o{h1$3$ zEV_QleZcZ%`@_-0{m9M1GI(UtSq(o~RrOJ)(;F!mFGoCo#D6bZQU3B7boAq{ij%qJ z>Q(%n?%L%&G^J38cW9rj92}}t(Ox~7nw~~Wudf3OM&|%Vbh)CbWhPNiT>+jfdlDw- zR7ui33Rp@%2(;|vr>ui$(XV(LRK6;zs(k-yWT6m^zQYd z`PZl~%9VM6zOEJ^E$waY--Q|O(7d^}qM%}r?;B2d8gbB`{bct}6&RU^WaqxQ{<4%G zs2UOax|tV>wyvwgTuV_Gf5?BBXO<`~VUjV4Ffw0=P<-&^s67tYVWR@dg{YkLWluMi zv&#midK*6TIK^Jg|toEx2QfvbJnTC1PvQLppk zGTSEt`fyzI^oU(Sjn?%G_Mz&8C0Tbzu(`1cA`$a(gznBrs4s@ zRJMkbsBGKjc1`-8r^_d?yM5B(sGF=lJg;U+_$@rglQD^_QlEiA2*H!P7Y6J#;k+Gq z3@yt=`PV>6k`LE0Z+_AC@Zu{5Ep^*rhaa5aoSOE?Ve=95h|o<*T5JtDQYJwKa`6tw zFpsG6T{0ppY&(Eql9{J+uj)Z*ceXh+zCw~dAOP;Xa)IC#Q7bZ@l0|>ze$>F|EzLE7Ki@<<$fYy#u@U5^8BN5bBC5vbSI7 zGTv=7nJ4ntaZZcCXdnU zQdRTzH}RNKeTkR5Xq;MQP+YI$P^MF^^8jgH?@z(JD)nUrDt;+yBpjF6^soy7Ae<=` zl-5o8?+~AloCG3-6v2EDoxwC6O5RZY3)1&hy)T=W-P)B8p2|f=CGdPuOs{ZbjmnO+ zRtS>H3FVc?CbI#t3m0Y;N%f!c=J7V)+GR^pReWJ@$26urFp!JjI<5zC?O!mq#5H6$ z;R$1CEzy4OeN4|<++F~X*Zg|rS>P;~HOt@w@(ZgRca(;62#6k5sLq@?A z^C0#n;l2L_q?=W~Ga}gk*N8U(D1wgj^ITUc+T?hnhMsHIR z<6Uu~%EPG@3b-^ZUm7#n6AcRQLZ+ma_kDXlLAh`teV8JDKy+9|3S|;49V!ft!~N(R zVwe(WQ|S3{cXk$y@q>Lmln>~q<0`R-d#7W;0P2TjFXe!u}NDdUhi48 z7|6b+jnzyAl6-BmCPx8xH_7kc9ze{(WCtsYo5Uzj)DwllP&`a`;tDYqvc?wl^P}aA z3_lQOm8dbo1sODo3P8h072$<2)wP0m=quir(b47GO>TQoyTRAZQeOM5hB7x54oTDh zVQc%i{6pd+VBOsvFSdCvW{h7keEy}0`fpb%qSo14JK_c9{No0CD?M~lfKsmxDyy(+ zYQfQoyMg=Y8VnnP&v3N5p)aRW8M=iVmfcuvE4_&S{rh1TX>dAvS`!I*Qy3 zAvs}QX=dhAA|KCI17P#GIxfdhYzLG!BxST-z@r`%3*F@oxTi=&0u%7?o^>id%g&or zX^&mdc6pRc+YNw-um-OI7kGC2-o?B~BHK`=$9DC%N2hRa#uEh@CId7sR`jR+0>)9S z@>d`~cOaa5snAaud+t|;3&`ReHRQ50sysnI^?MD$2*ll-uru15D%Dns24*^KgG=Kj z^3hik78VvYYXoOifH_3z&mK?QqF`BhbzriDUBD}%NC4;mMv>k5J8}v=qrQU44CX$> z^!X-E(KHL>Fp$JP=tIhRMzwJH9dSVxXofWW7oGVAYb)NetD}j&XBJ^|_`$<#gv4=V zHSMR)1bH5 z;v3o8ubGAfg01=Wbi3 zCUA3Mv4qyG^o1fh@%14oqz<(Hv=C{j$Vx+M!K84dg4;wY-IZ2PSM`SKCY@4OmEeMT zS&r5!NDWD|N27;(4mGL}^5rwigjxU69-}dcd0gxl34dxxZ;X^2csGja@hL;MW8287 zTyqf7+FRUklyys;Ui*>s&~zd+zT+4X$XWh_Ox#&)gNI+4`1As^Q;fAJ>RUxc45sB0 zmCxVbNUhJ(P2sV#85t?8U}7jiB9W%*>*lMUrqOgJZsdL3i|ScDB%mm7sLf&P=?RJqx&{jwQ{^XEMO$i6k(*$2wBkoQK$5UHE+^=n+kM zfn{2n0md%mCw^FcZ{vNk+RgVQ`8`Lu9~neR`(sBHsTzdwDxJM^0m+xMBt5p<5J`T8 z|EOg=r-qv4BoRd%pK`($t{ZBrTn8`Q#U^4>Z}&+UuV`v#K3$WwwgI-u5_{aS;7=X; zVIMIyeJ>|JHmr}hBUS>rl>rAI;d?}URy7i(5-(c-bodp+7_Gko+1E{H(*^7NX2z&bf&i#4IwOn1ncPcoISxNwJFUk6&GJ|4&4hK0 z>(h?^o`S+*DrBhflMCx78Jk*b&O`J21~Js|yO=47hkENK!wY8%p<$s5mjEN|H?KrY zpJ{nN%TnjezoV=dQ5fn<5|)>0%Vk_q%7SsT)SCs^q)}4B`Ht3Zal+8bPUI1*bbrq- z(r??od@hG1`H91D{4ghNKae2=X_&P1Eu5L!aKDAsH;j~jiX_5Q`zISx zMf&9>Yv&E}GCZTsP)!h1SR2n~Fc2g3p3zToHQq}cHojHPfrbQXo?mlRdG_~?@%T6^ zLe|#RaIX^FZB!hK;)>hu^7h2EB`>3!_Y%8Mzcbta(D?(>^vVzh1=LdgRxNwd>6EE` z7=MV$Oo=bXJSJr+rqmf+XG21+;#cN>BHw^9JHb~)_0VvI^cy1^7}Hlv?f#ocC=U%l z%_ZUGwfBCv5w?dO0Re|n-(dq{@9+Kz?;2xhZ{PpUKvCFmNBsF`^F41AvuC!V*in`Rm~1^KX|^~)1sA{wEr*mY&ZjMYBo zSuE6Hx+xk^;iB;$6RqYUun2XeR$NMHm-f*?;~17ix3~1(lNi*J#&jFDY7mOMo)~Q4 zW3L!~zSdY2(|yfC{hDv^FKptj0J=XTPC4h%+9(tvO~ffZ@qbHKg+T3a1Q3wfWIxKm zyTM&WA%iL`Jlj-2*78v+a&71=EinC4c*bJ+zhTH__<80$Rie35wQVeraL-| ze%9EvP6{F#`ZldA5ggiB1zrsHt>zJ*Gkxl1id_^l_$n)&5^mE*XBfr)h8*tsP4@-7 zsW(zDDIOnEAF*vqR;1q^0YFRUmj_w-V5-h^Q>8vO!KjFTR7>P@Dt2BSM>))ZhVOFp zKZ;%Y@9mIMfjdV@qXT+-$r(OR*k@G8hK9wFD(E~D+(rsMwla+?!ou?x3GRAX3e98- zAlpow?flq*IwRvKa*v5AL||?B0>zN-uL0$UXr}EgxL(Qo|JE+2GKpm6ntRXiD)URY zf82=-?8jvhQCXps)-T=3C8Kgw3urPB5&jQooKvjctiL+mNaHD0LSkohx?0%}SWO_F zFy71W zj56V|OL(##z`sOy+qOTK7IITh25PwY4e5aM2zo%;IHs-_Pfo?!q*&9`Y9BJDO%cG? z8;c8?K%TqVBg#>81uL8{or>@`jBcM5r$AolT`Sje;CYR4MJIBQ+( zZ~D46B25QScZK+8Ua76$pnY7x!(;z}F>s4n1^ANzP=DoKD&PI!rWL88i$SH`Od(jL z7UC$ebB!**_I*FE=iPteXrjJh-0dkQ3lfW}HP3gNo5B{@UbXVxVfW{_9;RxZZhjlt zxnIvpk=Mi(3^_BGFPPVSu8o!m#h_=khBsXBg6a`kB1Hjy*qozQGmJTx3m2@x)Q_rW z<^xf)BILEty^oUUbvJ4*meiGGckqZgS?{&pLcjcK%i3)aZCjFC5V|c^^U*g3fXe#BYq)9 zvvN)G-jr6YriYiqSiZi`q3rYKTM@{m1 zg?6Xvvo9kO^zwQUTiCQ>!kOj4Pr?e$UQO$W+J6NG!8h}%0GX9^yA`geK4?XgK+~{9 z3k}v}w4(f}JWq0FYu#kAX=IR8GfSD3NDZHlwViQwF-ZBDl7f;BD)jSsT>y7|aGeOp zjsXuf8r`c^M>gNp;n4<|;*??_;P?yZJ813|U#1+oR~^iXC4w9+Zd^ZdxRUX zO3d|*6?PoQ@JCpOyUF}HP^!mc%M1J>nc|$O&@g#MOuJ+h%5=0j-Knc{7_rQfNQ|2KvW%;QT)^=rJ* zmus^th){H&KpRqMl}vY|0lYmbV$BD2DFv(lqkp(La6UB#Fn?h6o^}0Vd`&+eHZq~y zx9uCGCa%wuOJE9M2$YC51lF$^Ftz@B4rR{DpK1Ma<6tB@^AUolxh%?B9=ik1gD3Ki zCc6}7<3R`?W-ux!O!z|Ho_HS8)&>1=Yq&SlXQ^S-rygim%Ayx?SB{tU)F<*ud=b2v zkL%MvuHp8wSFFF^$O=p!wQ~VMc9_% zD&AtT%+$ci>via_gBYv}+SnW*wqBO-w0wrzi`QvK>M+Qr_pF0$(8jRqfpp_Y{Tf+h z9+~qO>j#sstRj`9#zfAHEmc2|r$1jYQ!rv%JAYk*%{vZTMF&J_8~LIlIQ_$qS&S$2 z(|-;>^Edpv1ry%N-v~>LDas?IJyoe6ttNVbxD8~rR$!tEh}Sh8Onl)gN@Q+pMUm5M zJ+2K1_?-jeh&L31U`*Mx)MU0XSxObql;f$bn=-iGkb_h=y~_Uy zPy~G9_!3*k6z+%i);}g3n}2DV*kQVBGBgZID#SDKX!O*@qiNGeNW1aucrd5QfBk{A z3ngxQP>Yvy*N>OKR*3hxDgwHZOzH&fw_49iwSCv#`1aDP37+h}^ZU_-$E-Zxzw}Kq z;WtI@StM6n8L<_!YOA_>FMT$L@Ax{@$ie#G2}-$5t~A|+3nX#MhVrT(^I`Mj-}vB; zl4KR1whQ0DbhAnY&EM3w-wh3f(9n`)z3$A`c+OAyU0X!SMmI5q2P1E>C`*nRu*o(Rl@;NNM6uP?7fk7Ufko9=!GhcMY0XB%}^* z>`OGO2wRxCyuis@0w=mNp2*eMd`Z549`l%{p}UVj1^Af1zF51$I9mB%-X34P>X{4I z0fIHu$Fbw0xf9CDdksS8;JqtAaaA4S0wNJFl_keqb$-TaH7^Bq$02@#Fi)o*zTwp7`2Oxc6wDj%sLlIu%%@apgyBL~*QRoL zFoO0~z5jp*BqGH^JDGUkp3cFP&T3!%Ao=eY`5}?ff(h@Ja^aWeOW*F^kSu_lk%asA zPgG8sM3KqhAz(HT(JO^p09}|26mYG$k44%mS?Be5L;S}9V}*&fC86xg?k zxPR_r#qFkcRdqnpqK;ZaS^Wuc~44PMKwtI_a}da4gg?ruiclcv=9y2k@u38o~`e{XZ)uruzA zM!Is1T_j}$+f%D(r~%c!NxiGBTxF*{3?r0g812XG8UTnwodR8O+8-nG4dp$DvK-0rE*~i)d>D!i_(B8>%99 z24rn}VM28lY<31h{EAHZvh+CuAqPL)^}DZS`_I4wTGtmt@7IXaENfwBuQU}S$49r* zpyitCC@_hM0PC?ak-gPnHPbgSbXf*F1_%0|3vFmHUiWE?Cb+X_#h3%DMZ_z?6FMVC zM{50&PimeAaAyG(BLCc5U5(D4`LZF_x8yO9&evZ7b<+C-{L(`&g^8&t>Iu~4=e5jC zMriAMyvZ!BSQ*}CPBM@ZFwbD=Rex`JNPVnAW$l~23J!YAYf4>6>jBn-mm7%Qgf9*? zLPu6feaRMbeY+KJF?g~y0FRRahLTEp9fQQ<0-n$@8E5EjNZE@tI`2h2R%&rM(21xryV%;-E-srxicL9ap{ zw9l1~yL(FVmTBdh;YJ9&Po{7i@0ioH zysSWq+Iide3m`@gy9!0YG1k9nJ_B=>2WIqQU~Ki3>wqwydUn1O)ik2IyO}?1A7mct zHL8$xhm6e)_}jE8r>*xwV}Dz&(xo}p1pZ%NU5g)`QLAFzuN`~5?VtZm`Fgy(v~eCh2t^L&ZdD>}={Z+pDKs3P z)zHEdWC^W>DjIuW!eC;4`~>gXzV79A1Lz8W;7BKJ0kB_k%mqB5NSopRyFEK2c<+Om zX}=w?ArW!n!ts57AH2g|2QWx0Fmj%`n=sa<%A>C5+mVh>*EeAuOK7E*iqNS6YPA!+ zJ<5B$k9TqZ2U~9)Rdx184+DaLNY~IvODf%=bb9GV=|+%l6a_Bb(n?5oH%d3sja<6B z>pj<*@9+JscfD)5X66r_8P4aNC-&aYetf8|mNMFrI)P1vk}aq|ZDs`~vR%v?}7 z7;m-(c^C+asVM1T+Fe$=drXwenV$haDtNqJ9g`*1EY+lO2eX2Sws%k0J6Aun`1S=iiBl({U%outlA)|0m>zDp`d)?WA^E)! zn9mjIa1{i*N|i^DId}o;TNf0rp~+OrKfZ_;T>n;IOCfy{vc@dd0@e=Lr3W0|Tmyi& z23SpPCFd7m+pF+dk26hO9GT(1 zA}xDWA?F7>Ka^5*^=fQGn8hz+gzj!GB1u)0OLVLI%IV_cmVYo29hmDz07A5TK+DK- zeaGm z`Z~ck4F6w(ER<0kvz#2T5NF5?QI|a=LJB6K9C_a7Soz0 zyeF*d9&dJH*(= zGixX|Y)=&G0r)f$2M%W@7(^GgE-esr8g~}0^zzQ@3V81s93kYa^w+fmdof(F&sc9* z_(Tz3rF%Fq)BR$9K@TdY>26UJ|H|)}=>n!u^}v>gq!m&uh>zF)B%Zx7N^G%sn)ge%|scKPa*g}<2PR_sC8PChf$F-h}Brr-T$)ED?cRqcb6Xk5l!uxbvzUD-a=Ud$-E3;Iy<-~Q5wZYlZs?j^+yT!Sf~s)( z>}c0Zx6&N5{5aYDMQB$TiAhPX@=8~D2&zo*6wQa!;mJqv>mu;~xh{oj8zpppXv#=G z9Cij*c8fX1h9b_LY9bTCW3}M0+I5^3!UiKGUh28Z6gOMJKIv~2u*Gi61cqb9LDILU zAyIe>iU|<)alEb|OFK;i*yWr$KD7!+ngnWz>;{UuHxb#fj*lp&ITNPoc_)hX63qv` zO6+zeyK^#4-&VFq>5fVeGbc7wNPaPdQTRG*tV}msH4=^@vX2#h^=hwe}OT zCczjm6fQmAdsyZB(o@t99*eTi_37^*NG&Ip@x$P=6^5qAj^lOU3h|bIE{WEmDUZ>+ z8xlC`z_$YK{?G*=TBX?Xpkeg^ipmc5LW+&EfS!eN6B!<&tl{1SOvKGFt|4kez2xaw zqP^+z*9P{JMM*Ib-){LoJWwkj-m?~)6|-`%a`UpBE_+sOGZisgW%U}Y`Tha^nYrbd zER2ypFK_hRb19wZCK{RZYKUt^giU97j4>jev8F_bOY1Epm1LiZ5SbfMq+hDW_c7O} z&JFLoI9!haad!N}_E%kz!cIx$87?R74G!SnZEnuwSD;(2KR?R`?TnAsiz3Meg3bhu zVqG4Y#fk`)SJvp+1yo8JE%`ao-ut(Y40QgPGob4~dFI#t1v9kg5(p9BgH7w_j*yRa zC!=e?gjqRx-XU9`^gmU^0ff@org zZI4Qh=&{Sc5W4jXKb!6t43sJ-()XP@%+{n;sPf$YzZJ$cyzI&v>z+*qNZQMjb`-_{#g>x+_ej)GVyBk#QBvf=tJy-nN1ZQeb9f3Io9x>t7pigz>F=qidsxs6Xca{am zN)+gRP6<3U?FPOY1?s&pl9>o3ch}k#!wr(Dh@BA19t7xJ_j#JzA_)(M*dii?E5dO! z^0=wx1D*y!Xb+5NaQ}&l{>a^NKgUb!Sc6>nQ8(MS-qR?|u1+PXeWL#-yO#v%(y%Y- zholseipD9;m%K|qpdx>3qQSW#38Bnr!Dc_lbMk;T#yfw4z0D|KmKA-TO{$Du24+U{92r1`oKW{Oq0R8OVk+3UAszqv*kVT; zf4Cp{Eu>-AfGZS<*Dxw4Qc`Cyp&7EnEPoE4gSrMSD9miFj=k^x<42_amhs4S)kasl z9k76*Qt~%P09)9gZl@t(gDp?dq!*Uk_u~Gj;={(x%urSc>xx&`Z{ZSD-6 z_m6G>_<{cr^&$y+L+{v5K)G9z+lfN(P=^+YclOoqeN3kF&pOfkyMkV%lYV78=U$1$Fvw{7^neZwl7cMiR!{>8?WwHui z9$}#kkL^wW{lvcvHjW0oj4>YZ9gSCmElzRQXFC?KPS4yVG@Xi1pg_WO@r&HCTDh(s z-|K4JToub8V_imK#w_K^)0c*NNjbiD@1;+CGnmX1o1mG{@Fzff?G@x$csUSXwnd;q zL@`Y;e)?FRZR1&qKzrhKPr~()V3@osWoj;{AY)iJs7qeWoIE=E)|`9cQ8P~HWR4vm z>SiR9!dsHNW6%5zD>sco_9yDahS}o!5vPOUyx+C{qmyF9sWH*m!M$Bmeerp^{>JpF z*P3^ZfSo6gH(x7sQqXU3wt~r}Mc9;MWm7Gm#aaE%lnbFnh2V}fBq*{*zU}gAIu+TPH11}>F3DZN{pErq%LXDW3Q=eNTG+e<^#bw$tFbmg zE}4(&8sS8Pih2KWBo3Eef|v=L;SN^;Yp*tef;2#!-mebZP)ZF_G2T6UYyRRN4Qe8UMiLJRDhXu1kMi6>b;zTFhl^d+!5cLw@hCBqm-Y?F;poZ)J^Xu`%qm& znuAnRR%j08a{5zyP|lQ2U^oklU2lkMz32}lJCzNnxV+w7!PQ~#Fz+CoI=sHitA&{b zGQ#NSM?j%P%lg&*&zLqleY}-8-jV0-m-srcb*9V~!`O#@(zKyC%Wm!F1K9XIqsh~g zvW9Hmy#FhTFIea2*<-{|*sm1DNFnoI4JB`$z@LP_xF@`kVC<)meK;99yi~CCa1v?$ zuz_WBKcBSCF>(2gNtx%}DmKzS@@YQ?)YwhA?g`S2bX#PArPy%~uYFQXZJs=tj?A>E zJ7eE-$f_hpFEbR(;O{c6OlTQ}Cx{GPNm6S(Sx{fwQL9!`iB5A3#7 zv`Vz`rjuVo^ryK!=W(I1R}9mS5OI18kY7T1l{z9Mw z_?S7JhF|hgyaspVuI{w5_p#WqY|XB>f)+teco8A(6DARD7CqN^GW>THP-C5x9>Xr+ ziEW$^fFZ>g#r)a6;F!%bd@Qydu-?mckunkFB#=W;TA9?ikN!gRYPX%YmhQsQZ* zF|pf{itM9FIX=UO-?dft09%g@2}o+=1-12KBsts>4h|-0?5`k6Y8-7O-=~r{(K5hA zRlZ>z2iq;WYpr=bD+d=)Cb%eXM7b0kFnhQyv4a% zBR_~IhH@D<6BHw0%TWgWE$J;bvN*LHx@o@5T!#3Xu~@+ zp`MKtM#5lv`;k7YibTk@Xd+t54=qvtcGF=krWr7AhC4s%w1}gHvV!{>bo@ukw zVv^TX)qkkE!&RRIQ9ih7jPLMo(czRZCAg*d%A|w6YC}Da5fNhd?(v$7{m055Gx@Q~ zxD8wWe8f;}yay9w$|k+{1SnGFV&tEa`@cdm3NBnTKDMH2NYaC|{XCe}P4S(H-Sndf z4nn{@eH7b4lkq6>?Xv??qJcEIPpGG=j)tBm7{ExUx$Z4x;)IyGrbI@-c$3*prg_LI zC`-|C{t^Dh$o5qzG`ZGV2BjI5K5A5C+kz^b`szf;=t9+z5LsLtzpi@jvvuv?=FZZi z%!RQGGtXJViQt~YX$nH4(MA_5l`-;hI~%Mk&bs$Assu*kf{=b z!Fo%fA_Q7PBW(lNPaW9Ls$k1qte6B=omWlLOd|J}E8M^_3_n-D-ih04l+Jpgp;{n_ z;^Ds%{T@F!12io6vbm{FP-u_5(KR@kl6&21Ybo178_u^Z+Md^2xm#ehBy~-M*2da3 zC>$i=g0B#ZrI`j77LeoVhJT103f4OVNDf!;w?N+?o}VFv`P|N%Z?1u}-JaYgs3sX} zzPO}fh@ttRmaTCS5)cH+gz}fD<~%8{5Qcd-;V-Y{9H()4U%tn)7YrlgivXC8x=&us zafDurcfIG7@Xc-|yp!WEsa$(jZll&+Z!03cL|T-P!sSJZc{TM&J}=kMJ*6+c^dDBl zM-1WK7Kffy^2{|%42Ihur_H!~y)+jK!6HnuBf6S8Rh4w%BxU|(WI~n2_Txp{>9X&h z*gbMJOh3#Alk3-M4-7&WmzcTC2dG_5l)z8R4evhad|p+x8PaWDEcr2dY?K-6NeKXd zQ>*bjNW^AScF;a3Yci$azr&6h`>tq@}=F?w&m>n%oO2l7+EI_Oj@oPPamkz=G?YoFHO76CNz zzc7N%nq_TR(YA2zW&aW5*S zkE}jBawuYZJ8?=ELFu+TUeD4?g@gu5;#JOoB99G^-xDU`#vE}tsK`#A{L|NvLSgfk zD&t6?;}l~}xb1n}|EX9g;Qfy1y(Mn~x#9WFc)rOU7)jXwOx^%q$9YVxNQ5@)iGN|(he5;m@}kaCq41$~dBwEgN*LGp^@1ylF-0!0Cvn6eoXQnu zB0tk9(k{=O$dtk;1deE$AQ-cyi{tX7$8|qCp9J6ZK-aM103n+8w~K^SlxhFiEuIJH zK?!z*o9y9xAGKU(TycGD%5-fH7Df6vm1fOg?xzk%x{ULg+OWA+7SFS7LJkZUgUPCi z1AhSJsirA(WPT+K#lkR6OI*AM>XAy*nXtpQCwyup5oC5OMAheQP?gvczlVG4kjO)q zO?oN=VmTX?_E?kM8L8b9WMP&B+amXxj{eRBHa(ira@EP0hS@~HvZ)}#)P&RckjIUT zN$-h{j^Rtcp0cDf5ppz~0SqGLvPjXbL#4{M{GZvR9^WFtqd@x5_&MqHL+7H7hz8To zM6-+QP8qU)&`mWzKOv+!CWvGbelE|bVIz)FddRt^Jv$9L*jTyr-P490-LpmC2R1hc zwxq73p~FPha^?iP7F3W~Lt);n_;37hda&UY$)}yI+d3+U0?whLcx%%)rt`3;xDwPX zXqPW>_GBn^5LpmjRph)MkZ7@dBz;Y;_f^~WXs6-a%Kfat?SRq+M}C9Erk{wLNUuK7 zB4~vGS2q3?AHXBgUeDXk$WJrtRAg^>=@K-+3X+R`Ve4$Trcu|U3LIytt-ijdO2*9` zdJxTi($w-krE>~u4~1FD*A;8@CqwTJP_c+y+3jy&x*5e!@r!l;tYkT08sdv^f#~3CFO2-s3`DPYr_p#&JmlIW?V&3} z^gKqfGYJe^%CX=-ouQ4enT5;6JKzI)O7o`MiMBWL-$z|t?QO4Ys;$PP7i20*J$jIf z0jn_3W3^OXdj-ZmtqnjlVn|bVSbCCSw0pB#FkxK# z6><{lHd-t!wj{6{Gikll;)9IgC6X;%-ejIwKFL-)Z(Kg7)cL}YqT)#Kdh$>8EU1u* z2(aC5SMY48#2V4c(IwRg6l@`RTNc-#3Jy&}G%9C#wrA7o1tEY><^CYQ(u8kJB8P5u zh7#)mH;;;tv3%gs$qG}FSlocEN%XTE*b7)Z=(YMb)2109_Soku>4E^fxCQ((N;(xD zYwvo_Wmo2gL zkJ@~p^p!mcmVefWX;^C>juTs%KL1E;>fqGwU$5yZ?|Dwi>^u+b(UW&$^NlL@ShfH* zn6-+tV|(>uy2dUgTB>L)$EP~=n2p*;&%LZxppjAQD95MsBUtJ;(_C`~>R7?1s8qPg zOrt)7P>&>XG*F`m^u_2WKA!{bBSFMTZrxNZC&`{hOD+oDHfK$r@c%s7f!C9d=4Ej( zk;U8ikdnMNl8z|PEpa)-A>@%bC7;C8|E;)Bik2Y@sf>TR#j8L_6}D8zK~|M)^Zs)Y z`4Es1jF#WWhfAVL;s$yI>lc~?gs@|6{fhp!A|nabCHG>_S+^?Qy>v%-DBdVtQ%#2(SkE12*RcRha|$e67DfcHk?R-7ONX8MsGX zRJRyrN`@Tv<955Ed{ZVK?wd?!NXXPqkR7>N0Ezlpo1pKalEl&`!6Gooh1 z?d2R$Zwi&hgc_N$+WHQB`f_j7K-4SWk*m#OVPe=dVCX&?cH>l;AT5GcBn~^6|xEah4 z6SJ-Xd<>UDfvkRcR5%fQ24qZm89J3Y&Jlf1*l%Dr!PcjHgoeiw9BMfzp`Q|j>Xo9< zY@(yq&q?5VAnJ6`A6RKAA#A(J*;(x>8Wi83_*G71$>&qA>x?tUCj8b`=1+h`Vw7C2*Yz+aG0YCe*>N}hg;lZq3BI^8Vg506TjXWEmPjXNpEf)hCA~og9*%mwjT&c{CS{4MG@w|7v zg8fGyMU=Ruj-sBu4hySDu*;N)Thk3-A}Y`HR+O5qw1}i9T2XZ{NMG!`9?6W0BpQPA zT}JcaL8#GM8_(r6AnbFmA=`M^=p1K$NXL05Q|ZbXA;7ejMm+LlhN^KiJD2ZNTFAd)M7TYdI-AM!2zC*i_lgWrPuDuhKKu` zv+uLV$Jpb=dVJNxJL6+uW`cZb@1h^LT`JqY+p9AK5oWYSI6pu;y11_eaAhIIofJ-i zRL)zYo#Pqtb=xfyj9yzhU>RRQ`9)ns2pRY@>Pn_7xRswy@UqOq5>;yKOtlfaAGQ7WV*AJBj+gr}P#= zURU~r3CiWzn5l@vMEA|e5OB(zKPk~xkq}Q-%Fy}+FQaAH%Ookpsqv8qP(=wkliDIV zC8*7iM{0jOwTtFT=ecp$2=vN27*VQi7@`TLjiQIWNNtplagmtaA&chsI0E+ zRrp}+Ksb50+>SpqY=flZkX&4@RG)c?i49ZL>o;+e86uir_r>{KId?tK`!{qG|JLnl z#}XPC^P#_>)O#;ecM~zS?kf6*V9u)Uz+tB8_qK}`-)SH6%5I55YRE68UyB;{%a00`qy_hr5Y3-^jTK>tdr?K?FP$g+LI7UFB?9_01M^ zT=|Nb+e%!*IoXq6L1^lqL)RLLh6@NiPd0`V)#QRC2|`q5X`YC)sW)*`2)S~fc>p1c zb47OTZo(YiTGqlSFh#`J%svTAd^0IMY3BsC2un&5)0eiePp;3#c7eG`>!HfRjbP{B z%5k`CB4>7yUTvXLU^ z+8^vkv*jQH9cl!jD7l)O;gkm4QKe05uj0pS~&dH8OW;-Qqkm^j%r{WFqtQ4dDaZZAw|OLJ+5aJ$%)fewkvU)g#~&!}L734@#r`dR&iNRu5s-L6tz-8B zsp00_&q!T9WbS^kSGUIWop{bbn6@E7Cs_a80%-!JX+PfDH%$iUFMiG7+`qCwB~iGf zq{vn{t+w<=x5?enWQ&x=5!{M2nI>*mz6JPV2IcjBz+W~e_$X}vz~?%~!JVOEvdVrA zOt}p&Cr%uF!WOlJ!Hql;iX8R7zd~_QHYL!($2@}PX((ZNNqbJBVR9Z*#O2ZKJSEA$ zr$K8-Rj}CYHt(Ka?Q{j3#(m_D0)_pO{tfTr{zeCX@qN6W1UJK%R!H|~PMAPplKu0M zXFfki^VMgY`AkFo7m@wjRLq6{sZk)6lAhoyA)`H|9Ek-0r_`Wcb=JqlPk6FhlxP1F zq zH#NoMWpZ2%CyU+rw)Mw9iG?xou*i7Brd_I-LkMfRomP8-RcDf8weNx>%kWw|BitpRB?$YR+obv>Xt|F zhmaqVwp6+7XvRyLnoUiv9Lo62F6;&>dlg932WLV5S3K&!}*-sEENe zS4k;_ccPcJrqW{g2VhgBQzKnq!00UdrAb#_|5&a{F5E^*Q@#7jZmuT3MhKXfQfy>V zz(>(7SKZGzp(U9Ljrxfi8l>|X-LvaQ+Ik9v~yyk(IT5xBQL& zKQP>pjMVC2NquCx_tc4K76kooFs`lTcys^jT58Yrw6uZ_0WE-R@ zj|eS06@eA17V!^?zqmBoH-|yA^FRy=zI6;P<)eXbqUgE96Uc^uSlln-=GIIW#>qPe zU(XUqRcINK-HAGlrTY35VjVz2Ee6uK&H5_1C%0y^)n@g&9e<)8$fXky+ZPbqUoCJivtI{#ip6m$^%)^A|{P97K4T= z7Dg|@8H>(hT?^gGnESojrC%I{Cgsz6Q>7WpjG8k=!jYs}%Fyca7VHRq7+U26P< zyEb{a-^6|7O+?iBIUEoK@BXjQqqpc$#x%6H^(2oBjZQ***y(8Qn3zaGj{9+pzkej4 zdSNu~JBPH`RE2>>;?$*R|4L*$CiwGM(Fg(%t|w{Q6h4ez$lRw$`<+EIpVi^3WTn_2 zq?x0C)Ji1|HICLT;?#j`#Imo+>YYma7e~ETx|MvQSi-h zW{AaH_^z&o*G-GF2V_3V$9h$k(W1__cX^ffPM15jI(8#{7M+>&#~Bp7I&S&Nh#Y8g@-r zv1;`fE>$A(pP6-K(WiB^tp67YHYhY4T0%+GX~^^p3vKoBzZ`x2@ANzrG)p93ZPu&k zpafJs5Fb0x+A{fH8iF7<^9lsHnkseKq3&AJ2%zjrfdw5^Wc zeCuTQHK2!DK#K6ODFnxvwMtTH9eTwksaOO`HOJMbd-XDKb&2U!6NEhK7A5-aJ4b9-rVSX^zj#l zPEDd*s4>Y@B9LI3csa^4D*r`yomFLY;3X&x;Jk!&+-U9nGx^zRBoP8qpZ%>qHxy$t zX{f*bhwxfrfpb|!19~5FzHFp`Z}_01I3jT&!fA4TIHZ$8zFAvL2a<=D|Z z0q#L_OkBeP8{|`UPSM4D8jG3Sgl0K<+F;kb3(%Kn1KrZgW6RG4wl)(5X60rjoe7=x zGupOxf%y*HX_0=vw*57Y9Xr%7?EtjwRO#^o z<}8j1eercbm&&>E!DZt&sy*MqT+*1|(6+U?DukT-{a-HsTlLdbL_{1}6gVmU^Nici z;MxClLw`IQqR`L9sfp1pmuE+T;x2%G`)?z_O#B<5WB6fj7|={m@klpC8 zWsKZllEKi;j;4yN#_)SD(W1S{BSev#>)O^xN$mXK>I;n2u?BT*k*V$|AF5E`N$NMVSY zOfyL|{ud9ne=BxmHbKveB(ILD`tR@0$r_w3?v z>4hRJLU&%R@>o>3V1+-LH_nO*WqD{~#a7h}Sfod}A8wpTm!T4DUICmIO8(_8tYfBKkk< zI((O!3k^BoU%r|TZGid_n&~6>8IQH_UCFR^A&&lmo@#ALVbr z0nfVGGKAhnW5*?GL2cPu6i2((l@QbO(tftyq_Mz0<-Geh#f5M(w#IOZAeK)B6K&o_I$U7iO3JHfw1o8d*HK3egVxX7 zVF$EQ(UhBxA|VsGff(%fECxr1**LZbTcd*I`@A6f=9H%Ogn6xGTD)?m7o(S@3Z6kiJs@D z)LLh#ZV?Zp$bGuGP&FesC6OJ}`%%*rr};SPlQ7 zmpVy=r97irvNE`W`rwdMO_NNz4@rQz=_TC+tcVM@$&V$LVbbwfYiXt_PWw9CcwOb=T5M?useG0fr>A8GUgq zGAgbAkV+o0eK17^&p`g|I)~U8#wRLG$8?XiN6ZEdVK%qg#8g`!$clCA!%)Q<3tF|0 z-ctfzzz?ZHb1p{e#OENR^U3~<-$E%e5fUOD;9sal2(|RT(-o@c;}z>ve(_8BGe_&M z6z>$x+OPV=7=l-7)TkRjRp!VQ!?Q%jW0ihtrD?eT0S!|DBOR3 zHpY=-V|pi9PvvJgCQ$=n8UkAO$F|FtMx(*+UgzsyJvkl6ILOXKn-Z?ng|PB??dSV) zdvy^AZr5Qs&!zbou!M$!OnHy{^{);=%hD%GXx}4n_L3jN_pV`L{36|t>D@v7gVA`E zl->ifj57GmCCG+eTjlWh>Z-E6v$_Ui*sWxo>`NRJ{W|; zC?Dk0xzptX1>2nqaI@%+)>@=!;`huqbmm3SrEUtN!+4NPF(vTM)|z zaDh~pdnd8C52{_d!#-Libd9{GRAH8q#l@&kDo;7z3N$@@p^4hIG62)g`QGu1<|7ye z>OHM++lv&4zCYMU#rWJw`B(dZ7LX=XJJ#N8pJS)TH&#zl4Rlk~)f zr{-Z2oRaWjC86!-BUDQ!uir5NPT)LE*Tfsf@wA6y5hdRVyN>5Zd`#wbP-4@i>g5_UFh51g<=@GS>=$rl4Mr)TPx{BB5^ z&$=+SS06=+$ebbl!JB*jECPH%@o$#M3}JUrpu3bb8{PSC!FjxpaRxR1&i6lHKZ>(( z_xxi}o@7W22#4U0+|nPeTtZIQTQfA1`hu?G4b=F=Z-I2da)2)*ucG9xJI%RoYvW3f zWc4wCG0=9GS|bhJuix>Yu`%xNTo9B-PZjhy6Fu;1JhjAE^IO1w6D|KfOD=<70l^dTsHL{aV=<|I+Sf zsjb1SSTe82E6GV7YYGXwNWt{xs#L}JLiJowsX{_xa%f=7$ncwD`Y6C|SJl1$2WFM+ z)~g!I^z8ehPs%sh%o>F(C`URQhVq?0=f=dQ(r*Za^ek@9i%(?_^yIn-!L6|7)vTW+ zL6iYUP93#|{6%;saeW{)88U1q@SQ2)=I!pL%GMsAk$P>%%kGRm1 zfU4=Fz=@vaY~T05WV~UayyaOG$#Q03LbXz);69z9!Zy`rhEG;#m^iiMTN)LZ=gJ6O ziSyYo2ewDp50xtjy8k_i8x7i}ia3L#&)tj{o!B(}<$~A^$4jTv*fywHsO4fR3AfSx z`p~z#n{%nb77&ArxJ#ttb&_bHCJ<`4%HCgm1Z{VwGBo}dnbpZUW|i5AbPly>8zoof zXHQI0wZfWi)k-2THJntjmMPW0_2CVH4DW%SLyP!Qg10QalZFS!N9EHk_gU6`;;RXEhKu!c^ZvTs|gb9*rFintzp>^{ce?m;mf2 z($k0=_$F4WM(p}?goZ*Y>nTo;71Bz#cQ5!^j8z7kWCF7J-#sB@k=|(x;?P&Z(Atd$i;`>3zRxYsV`a}87nc%!^{+j3R2Z3 z#BNx5D->`MA$Y?{BJu0wPQ4Ajd)ox@fwmycic@nTo9?~4viaR0qQYE~Nb(FxotdCB zI=uY9`LqVGi#ly{+x~%=1lDV)FaCpF zcCj$J%6VFp@?W!2bI?l^5(4|Y_eDQ zUEVqDhUZgcoAVf@!IZcbfB%>QTF=)ZMygvaxfG@J*@{SN2@K7T?HXGRS&FR_iG8yZ+%SICx3jgmq8yRVglS>ctOgY$o2WD>_AKU>^`{Z1ZbT0jinY46Rk^&k0Y!LFL0Hipn~i@i%e2_XY{k>z zEt*ItrvQJ&KQZW(YK)P&Gr}wCuW+s{JTkm5hitgt*;@X2eD{HU-Y)O8Ao}M06{_L7 z8Q|CoHk~hqw~9FWkibLv`zu%p0W?sylm~98#oDB#x!$L}TKUq0w8k=Bgz(>j_9CFZ zNqwpE!BbrBs~u%_g*=2caAP1@{^912l5P$EYK6iOkM(G3gvhcwQv&j1B(y+Wur0en z6(=-Co7Igzj!7p*dCy(cR=RPq_wDK-*0}J)Q8x}nK)?159CscssHDvmE{UhWQLmQ$e?q6 z7Ycq%vNKiuqvhiEbuUxDvgpygo;4Lb>4;OOfN0bly5B&*O8DKS(Io$-InpflGR_5R ztaOo>FIHi;s=Z>YHZ+G?GF$b44`y){Wx9!wkD+Z1Jgs9KV+np=Iji+U*=`7f@VflugRr4UzaP1igTq0FUae3BV;;fO?{buYCYmyn8_jCv z@C8q81mtGF1S;WJU(&*+7}iw2{wk7di%&>G9?H~Y*BinPH0o6FUJ3l-l&6k`!ooD? zO_n)_9!4vl<;a7wBHyAlH(!10PwCcHxo0p)s!=P*9d#}}_&!Q(8D3|8&h8<2>-=P{ z6`GKuWhll%ub5A1TvrO8-AHb}=isb0rU^-u{n$Cb+4pzs#y}bx)m9L%ur;MK7B_J> zKQl(bWBXw|V-p#IlQA9*rAqL?Fb;bE63aJ9@F;53om>Z%4Zl!oCo4~5&LmGG#U$_D ziut;*t2nh5de4dQ#grA@9AQ6Eigh1pP$qdoFBJ@r*HVI^s3#jc>Gs(Ul@l}C4#0V53FMTr?s`|Pif919*Xn`=e(aq z72kbmslwhyhfg1ZUlKHB##hF(g9?pt%S)|iWZdX6mI2XHtQpm3m1q-pi!vUz4|Q!l zs1kp#3u!>KgRsiVG=2(JGo`0AN%Pi|%IbU92y?uXx6Btfab zC2z9zwl9IrqU0Saaz#>G1wDop)=f*IgkE!3X-Q;}1JgxgoxS~RQ@CiF%iKhvCX-a` zxZ{z-3HfnoruOr2ovrcA2}pzuKSF>lJ+*vxed`wj`HGs(YZn_ujSf7m^c`23jBk_2 z?0URHaYbX(Pdp!c34Cpj)v0lqv)-nahmCM~vsSP7Qe{d?BLwJEQ^yLPB4a}sW?q+^ z4%>Ke2D@KKJlHxI>~3%d= z{RV?6-|v36zmt{^8UI2k$2Lc9*BXa^)1P>6p1y;^JC&#^;3&6{t;1qw#`SaIJ__Z$B2--TY2F~( zrAEiQSu}{`^MZKULEWnGy7WRu&0>A?IHl!DUDb^zge(-Qq*O{@# zSwhilH+bX|DU&zLFKg?o+?VxdYaL3n9-H;Yt3lrtlMhLK-nzfL8R#gAt^I+3K;$GR zDWQ%HA$2IzQMUQ#Y?@xHy-P{QY96{uQlinA4u6%VasG7?u^ZYV?YHEm26tY8!kZbn z%vy8HZ%4#}@P9hKLHp*1C9fb3py>alDr!%MRhH8|Eh;hsLKY44&qq4)R{EvBCLbt; z;Ey&63ki011RP`UhX}NarP&%TD{LmbdQ+c{L>xek%SY1qeX9zLK|S_JeWF8hrrd;V z@ZpU38r1aL!!46pB8-k2Yzd0t9&a0b+Zn9)x}DaTr?cKtA$v2HU0L#Ph7mLQOP2n~7IgbKi#kG|pfIW_zg_v_QguA? zp@^^@06h^*wNF(JEq_;th_x4cd{ost9acoU`_N(c5g+lH|GfY;CgRI~Qvm$0ZJ*nO zDZ4Yc9iKjg(_i_HeSg*2r~X})zJO^Csu9Om0ScwfemEoSFCLAyJ6oStQ||T&^u8>G z?`HO4F6%BhX}{!h)1?@XR^zF?1P=`$MTo1jnzzBj8C;*@yE65!<4vhwXhyl<@9+u7 zkRE1eQflSv(rP-Vq6U0L?oibx_!S{W>3?tXKm{2i0GYISc_vfCOEanrFNCn8Gn9&m zhh6y#8msP$Ai{30xLM6#u!bWkp&z@DOB;w5%!hL&!Nk5U<-uz6z3Yiafd=;T z@Ck6v>4j586fA;6pZr-69iLUjWEdps z>x~!G{E8Ter^8l1!(RQLQ$>;&CqTgyOm%ggWH+P7eQ@o@H?t&Bk7T-d<&YB{ zJO)DJ;XpL>cyuYpS9MQlORs%MSRyP&ku+a~#5|u@a(Vavba$LlO*L71mqfasAfX3o zf(4Kk#C)hI0t!+>kt!fvNYfFxhK(TLUF%nITJ2y0EGjYYG93_XV9^drT+(bA%q1q&ufwQh>0B21@pq$e`GnvlH} zs}iHG_ld+huJF2LNT))X(mH!S#v1ZTJV0Ut%%-w6X_OAUM}xiKfivq4f=x!~kj32X z-{PtEo~SNe~_?ty;PonkOS{$*Qe5&h_Z8j=A#I9%(7CUAXV+q2|>X0De$oe>fJuW#BU@|CK)?G6~eF; z%CC>j_hwi1YKQaeTv!>YJ7zf^GGXRW#vujd(3SGG>BK3mgaWby^>=zfZ#CXkd7jtO zcX;4R%5P{6q9u+eDhtPlz&AYfv5|#J3OL54Yvpu2N=&Ciwd_!@^|;<@myxMsP&dqi ztZ3=_N}NOOolEZySv)q>l+_sYDZ2JXry{BN=GAp(JRLEV01=_N06KvU zd;-1K&}y8?ScoChmaf(=lWT4@S2tobdN^2nQy=%d*ZBjrt_+LW*Rs#*P0X1~Q5YtEksd1jrM=l-%WL=(>{Yy1il;`K zO#{e38t4Xb*SjW7(K4TD*ymy)GHsnUT&ezES$k*YdR4|>zl!$>w|u|nu<;`Q{>V^p z`cjg|Mfipf>o+e3{9~_<4jmU+zr4;fC;kbHyjLxG9ImRyY1qPP@GN*AN5D|aZ(RwT zFRi4tzoL{+_Fx4_f^vg!7Sij9R1nmg6BHQ+;Z^1`C*=!lony!S1ukulKh{G<*bEef zTv*>Jd_UxJPoQ3^z{cUDE<=T5HA8hWOPyD)KtY}!1JdrE+Lf`%F!c;X$ZM#e?yIM& z7aCK;EatG&X1ncY_0lXh@_LB<(0ti8BYNk-cD$KZueMdULwUl>_4czKVM3ilml!>q z1x(HeJo{{r3-^PFs8$P|z@$Avi0$P*;cR>3VVWj~d9^#Mpp)SknM}6MflJ?LmTtdD^rPPF4P;JhFj*HC2Uyua$-${{Yy^1N$D{8rOTMs)a550;3|wH6t7S zu9>Jx_+7^dR0r0EG#BpeEfjF45(wXB3ZQd2EAaHfDK(mdRmIO6zA2O(RAXU|*_!;1 z$l~AiR=UP?XqtS$8VfNy4%Mi2h)OCL>8zhPd3~i zEMd)IrO|Rh`4t*)bEPq{g>e>+K78<7bCd|Xngp8~>pL?|fe36HQz$XTKK)9Wr$)5z zW5K<7|Ad(r`E%PM686`ry#a|hjmNm3wHG>#k+ilOw~CoGr=fqLuS?VY=i*_N=S_XFVbG*o6Ag;IG@`QD55J3APK-i=jj+2NAZVwk`KpZ=3v%^PBfI_`r#IenV;YIA4;ReJ0Lb2@3-8NTYZ7J;@ zd9k+Dq?vx!b%zpgKuYyYLKZHoab2hva!O(rdn{3K<>_fN&L(%CeaZZf{2Ja#Gpak2 z)n*zpSq1xj(iN=r!#7hrm92IsEtYn$v}mufee|t#o1Xd-r==B?V@w0%&ag214q2hQ z+q-3J_ZFbo3U&`Xg?mlPKBK5VExi=bz|g(0t=HH$Bo$7O{w!X2e=MG_cj-(Q`H?8u zM*!Ud0Wax{cv1py(Fc1rN}PH%u5T>Q0fiJYwX+SbVm7hc4wz7UcQ5{;b1*YoFFg#l z9Z&W85;Ee{>^0-Wb9x;-N>TRk)N89b@KA_cDG8es_*~znmCml@uw2~4sX99ao&Me# z?ogGBl6)GGUdxvG9N4~NA4^?XDkJpyj0UfP`BXM)OCZs6Gl+Zb z8Kr-xoMmm!(mRFvuWaS!?w9C;2cHC5&W9yyF*GJoCiv~;?)y*M*y_kxZf;0!EG~X+ z?HL&>9*MTGq1@c>DPNuqHG1e4yX?5GOyQpL`n_lL2#?ADlW zM`I9PuV+aT;k2&Q^0Ih;v%!WafuS}TeM&mqo-yzN~5rbJx2Ssj>btkgw+?kh`v>LigHs4pRu-ts%+!gg({6%N=iLZb|p)Qa8 z@kgBAoo!SQ&%%;!h*3!=a`~WYsdHU4lE1mliXn?~S`u2h`6NoWr3I&6Zr)Ju3gC-11Qg5?=Q$2N2PB2kCLCWFa>sp?7j+t4d2H_dc5Yu}1|jSLw?pA8BW@ln3TD)^)V!~O#jZv zj9-HPwaCi{s2Utm<99QU@JfW&-8gi}U{GTs6$WywBkbg%$@^yb{_`KvL)Va+tl1RZ zKJ`K;-NTnxs~PHpw~AojE(B=$8>67OAMpPcbKyb#~KRY*Gh*#E$H@&cjOOSMa7WH-|9_uFH2wYq2z=~W5! zO72Un1bcA5?CvDHw@!&}`f1&EHyFrgkv$+ttizKv>@_eGt$+I~=KmUH9|eQ)ZPh%x z4;SBRl<_)BEXL(gks@Pj=6CIv8kN}8pSh#JxdAk8K{ zu`Myr5DdSWMXD@#^hx}I(0Q9vQ4k%iS^b>vVOe;Td!rT0@F>F^@HJMqNSa?ZA|MI` zz>DzdQsC;mfrys*OTyz^w`JcW%z|r>?qPh;cpNmycsx73Z~1Wgk_PbAcz@h%Jb)^pLgP$qT(4qipSsQPoF1;?5M}r zCh8Qsf6WdxMW7>HImp&;(2)QUyN(l_MFGV#t&0T;!a@Fx zQU6+nIa3&`sm4h$sL~&T_r84`FiqX~naD3?4xvZy;#||5XWwu5wl|wnw|^tL)YQA_ zn|&b(1>C5zq`v}UG`^GKmjz1tzM{KAR)g6gU{72TtY6yQfFN@OPT?aFe%jZgo8Ak2 zww8s|6B@~hTi{H?x9(jo1Hn#RL64OW6M;muw--{5b9-7Rp_`pewk!7ZG z;FMPVJ_y^c0fL0g00tpl^8IQsx~beP0uuA^>-PUjrvEI%6e$s+k?esSSob7Ba^gYq zBxZEE95tJ=Of04Z3nSsG^MnT)c~zPJvIAi7W+XCvc2cCv(uF(9gb>LKx`Bp1i zhj5HTw?$X7MYm;)(Qb&nU$IB#-t8KX82c%s;;TdXmV8-TrEtz0+X0$P9drFSXvYk2 zDrAP{^Oi5J3mSRETgheqN*auV7XoCLoTdLn_l^|!SeOehkR7^nj>hv3VPp`N=r6FQK&3{80`Jr z=|KtP5FE_$pDOEpoeus!nPr|1>P^Vfpzw5;06>3Kd{&y;g=t05#goanlB;JxNw3E` zJlqi5zFO=Qf!n#IHyDo1l$)Azhdv% ziA(8oXSHm1>sHr-CpY6~(rg~=wyv+m!<|tX%hFjR0rIA!`LsDK6zn34vxgvu$PXzI zb`f@DvJt$L2-TL)xqH4u?CKoMgy7tkz+J(qwZ}Y5KxozoMr8@`!h?b7B#;&o5{nSi zs^K)ypINZ=+m=6R{Md1u406E2wwf@Ib8hZF!Hx4w}%m6qa2 zUBj*tAvGAx)LbHu{jLVX!X$vbp*Qnq_)V3Eg>~bt;oP+E%5S&$KY4}s0XYK{{Ys9a RV}t5x{+?Ad(+JZB?akj0qO3B{{qhu z&;8T;eBV9e8ROQ$V7k`K&(+twmOhf=LidrMAj84I-G432FAWEGTLume0e1Tq@Z>XJ zt2-RrJsm?nKFQa7e1wu#=6Z%Ex^Qs9KH-r_iZWj?-!4SRK2j!xmwsiQEPS7k)E)CB zZ#E$%(VeGZ*l6#(@zmr$mHLrKV5nhO_6%SImlDostDrk;P-l{_Xn*o-iw%C858JLi zf>rJ;e2r#Ui2FLuf$&xwN#j|&fbEUe(E0Y+&+nDcq@|Tok=`O>zlC3{kEWAsWi2d} zf{XaHpE$RN@iueRkCswpYX4#=nGT)eCLEz*FoQetOZ@N8;5M}(q6sK)^f%k`G8Wop zsXa{yy&k+uNSmQ%qEoQ+j48jr__WcIhy{)}ed}nO8m>eoDNc^C^_^V_0k2&+GO0A2 z%{@Y-+l?+n-*2{(Y;UJ$JK*aeWzxN`L3_b5dpB0;q9Bo8C$X#RiMFjOf(I_KRIAky z!+BT#>Bfi19Nf5u2l6qB_^27a1oNB9o&CZXeT4QR49OtbO3zbyV%Y^exG+k2!Kc}K zQZkTtcXyDYg4H}ugN0kugBwySd!(K?3Z_5xM|zx(St9wNOQhRh?WweTv$2U;utb30 zXK~ zJ-Mgzf+)+a;g*yari8@;y_=e)U}_o}e9VlV$T^BzvrTUrAE}pkQf)WgL0vx8tiE}A z<061?$R{iQc{!K048=A7pf`q1z3k!|hHsK#4RuJASc>8GZ-g68cgx4zeV3yOg z^*abIgh)!fH&q!wd_w(#z$Ja?k0kJoPMA0e`w13jHx)dhNa`(?M9d|ELsf?+~FZNaFFMZKq~db$EurFL|-x#`^`Nf!G{gr zffaY~-=0iMZ9cx4gVWgzzEyjcE8!q)KsHXI`po8GzC;Lmd)>A!?&91121ZnUm&f#G zNTyCRpDK7t9?;B+j^C{KhHruY0ROiBzPogOv$@=At{d8dowT@9>T=PsxtnThpjvCd+gC+44YiEY{g)OU8QUHF2qGj3ug4*GNVnel~ifQ z1rx~?k)Jw2_PN!~_KtxL47dRnfwLH=FSv*DgkSp-b(`|w4qJ8)4hW8^{U~pKgBus= z$4b=QH@=bN2gk~$kQ9cMsavu9@~t@nUPX#OgstG#gXhF7-w;oEAKpd;y}Tz3N1^%R zYcY1dK3miSJ;+hg%`nKE=6yvJ=J$D;WQUJe5%#`4WWD8zq+jo7@p#;QB?FBUk?R}N zBHu%I_iof%+Wv}!W!?-ko&_OrPtko|%><)A#ryIq)%#Jj2Q_b7aCobe&Ryw%(|5T1 zfkYX>cHh~pu$Eq_6K+V|OZ#{$^Rbav;HwdGqyT=j?$-vG?DuJ2VI_<7AeSId1Pp#` z>1J5eEkOYL?rX#KeU6GEoqtzIPmB3JO*=XYH}-wF_M9SyydTqNw`1Z#h%IQtg1EC@ zsp76`1F8ctZGFbH^(N_|lM9);SNS)rt!f99b01!^s7G;kU^nr#;EDX6$4kB_# zC5(GpofWznBmDG*&py9-1&)mONMm#(X=6YmSK~8njxFxONPgnSp>eOzQ?VZS_drO$NIXQmvePx*nth@QU5!I6pH{;@&@65J2@;;OStYy0XscbjZ3vL9EB16T!8=xB?oY|WxeQymk&FdLjnaLRrN0K^{ zS|q2s8=ikD=q~8pEYLzi1C7!LnhJ-piLWO#a`XqNX?@^v_-oD~QS-Ns@9z&jj3X96Yu;Q?PakQ=s$hsjy zyS)r!EWgyW6b#Zgk2E=`RjZuaifJ49mXEaDvd`S(AUX)%Uf&4;cpukzdmL2iT z(2UUGtlp(KhUL{cYRk#Raqf4|KkK{O@DF(B&@y5Lia$ru3gpLG)+M3$$>ZR&xlgyJ#lQ=cdqWi;ztE8);+&2?76DzsBTqcj4j^b$nD5EcUJBk+)I2s ziT={t+g}mYGF-nIZ8O`E3sl?Ei9hz}wV!{GDuE+i8^s&^0oDh2EOz7BmgUK&DQP`> z^n{*R#gy}Qx!~23;i+b{mX<&$?!|L6oAKO=K?lLputT*iDB3%;2-0y;8)4aJ%w$VX z(@5pWYF|HitsXs2uhs89g4UTDl`1U#MEZ5s>#Q*uXEIN5Rfa-_4u-s^Yi&se`>86;>Z2n+d0%|p zWBvTEE^%{-rIa}$xa720_97J@_C73(ppL{APknFvzS_FfCDu6+D%2^`#X&w0IWMe| z9wR{$H%eZskmb~VNz1jx3%5WrIWjR z$Y~z>`m7y_oU>9=iainxThZ^LJBq3GI}&Z}nf3Q0*jnTK**dKA7HH$f*yvm)JCFL0 z*mf?+?P%1MPK{n}zial;ySqSBbD?%<&qO*(y8R(%>I7Wphzb)f$}}xJnX&cb2}c%@ zh^&kVj|3|cD_R|U_3+4^cGodl8;-qz>c&Q(MT$gfD4|wpFTAM@7oggJJ}J*OV=}f~ zT2?Sl&t;Qamx4Lc*ydT83JJ&xG_c3owhw_7>D1jSUsPCi-RisV$5JywKJK6;)15xr zp%>fETz4V0JzGjMt+JfGn%!3Zs2tM-TtQ=~vLdmXm|Y%gLN|79>189-(peEtGcY+Y zP%Uatlk?Izj7hD!&o1$T?fZhKa^xtDbCh%2{=3??S_#+J7jtc}R&dA3$ceFA>6vZA zjb=zMd#ahBnnY~{EQKiTA?GOqLe8-| zViPSzffMEly)HY4?z54~&E+NW-kg@(2L)3>@!1>cPT}RFP^lL)2H`Q6~=)ccwL^?!vss_TwT}ijvN^=YqIP&j6 zxnK#-XGa0=aKJ1J6U5_`wRz6fr-4iH**tg`<9*WumWO7o6U7HJ8xu8j6&JgZnNMDw z9wh@e-^Y=U0}XZ zfF)n;xas{sWk8CH=yuZjI-i^jdi1)PkqV>3EK$wchMVCnxGdDSLEGo&uz<5Of@wUA z>Ib}ijC*P_a^9O}o1kD{TAVuz5UB4l{;`0+Mmg^s1nIEq46EzFM;trOl^6&+B6JmB z>xqfMQ3CDTaELb^!65*x8^B+<8&BX6ui9{MZ*JiHY)juD|Emu?9GsUS+|9rGC5YNg8#esZJ8v5zuO2f;2qpc8NS!Af!{J9D_vbPYXfr| z&u6D3z=Jy$!b;X~aG0c*KQ~@WKi!0bgSRo1RkTqQ6J-aPo6>6Om}~3OI+$8q&I8Bk zzz#G`b#1f=9ZXHktl1s7h_8CE1MSPlbi{;LU2KfGh!w>o3Hi*ebP1Vg8EKyrb0ZTH z5^`GU=&?)l3;s16_!k$kfsKs?I~|?9y*;fx1FgB0J{>(98yns87j!RP&;UJXtR2m4 zv>a&6tVw=M^7lOay4D~oLkk;2b2Gxrd9}37ZEd)Si7yxW`RB(zbsY?^S2DBy>sY`6 z=`P>U(bGPs``s`bL%lx?yL|Iw*wwjyEXR2{8M~adt`(oTsj04+4fi$ToL4LTHeQ*|Jh$!*V^2~_OiR2nV}8$i@%2a$K$_7anfDVaJ+D@`CrO9+*qAJic^=1=lQ1L1Pv8OeoFL|_fZV*qS{0Bm$|2T)j7sv za;o9B3KQwMIofI%gYU?5h<&Txz9D=2@zw`8iqv3rGOo~s(~Zhd`)%VFpbzc^q2b83F?{rumZ8pz=7U+h0?(M>(k zcwC;t!8##4si)g%7dE|-XuzJ?`L;f&xrBmgoyr84-pXctvl5CvxxRN?zTw!Q zKDIwDserRyc@ikx4r@a(tAh?w?in`74XF|sW@SC?iZ!n)OjjEy8yE=U(^XR7dEh>G zzCX*Wi0|#{@QjjR0zqpjIrdQsf-ukZoE&Kd~|# zzWQQ(N`HozGtzZleliGQ%UauwMn;4aE&O1XmvijI7?`C7YpLrDT&BcKBjQ$PHx+bt z#%4)K9s79-iqI2uR=R6FQ_mIoWqbO>jlk{XAaM%Z>y&>-rXTGM(>1515eLIR+`A(k z_Qq}R{cp4ughWE~yk=x;<)W5pzAJtrv$K3SI$RKMiST-kn=xs;L~1-1o81m$djz?~ zC zB3PH5Dannr;sTSXGypVp+p`M`aAJEEHA-XG9Ug0{jR8o(uJt?JwEA_Pv$Ji{Ro5q! zL!4-7d!nlwU1zCPsj;Hdw8D@-1!;GFxo?k)WO<~tv&P>t=;pJ7;9v|o2C9GszD(G71G>S@&7}x z#Ci{Yjw+(ry@lW_)MbEah@}GYGy45!*1uuZ|2UFl8+`f^`CN65`9Hw;-IjRY16V#0 zorn8()Wa@OKU!bL`H$89-)XxwFHsL=`&ZB(lIsFJrD4^_g8$l=Q4t7RKj}QU)`s0R zh?mOfaO|Ll=u&gV+6$E={1%3o*KEeOrdF3;Dnv4Bjz;BWJbyEfffQY>Rg8S?4f6ohHMZ7E0>v1sD>mwJG z*L#Pg%w`+s2LYn8KWTXP_e1&p;+3ZVeT9eQ7!Y6I`-Ph8xiuUlf&qpW<^Oj+R*tY* z*!}nO5_U@h-0A6?CGoG!8CC*7^Lk1>{{NsoC;{yuNxyfkJ^sK%kPny!YQ436Y`;)@ z|4|gZt<~2kX3{pNJs-;j^{OFPoZqsTMe1Gb=ow)*_}#?63H!|(P57m+^E!;=8Rz*o z)FFGkXTJgZTkFFp9&@{Km%f0b-Y9&W=wy1Mz<#pKol=8hzwcWhW&$G5qSE%5JU z{|FxWYXII4HF@j;f9&!%|1in}!e3|=`0wz(kp_A`HO~?Mh4+4Q%F7i4Fdn8G`yf%O-n{eJ$Wr-;gtI#3(mYn&j;i1}6X z$HIQEeg_Cd8e_3L%i15Ct*U3j7_cJ&+@p|x-4Gn)5!&+(A)g7!iGL^Jz65WS_Qff# z*zeZ=hrEVG0Sy2D(@B%|&hgd_et|hAwjQvR-ghK8(Gidc(9EmP`8x8BMn!&i7RY;A zLQ%_ZAGPb3ynT7@O|zaL+<}S zF`!Ad?}IGPMr(tlyqKT*OwZC$e8mh=U4l!^Z+|Kp(?Nh37Cvl5HZWkHa4%?%ItHSjwu!zgJlFvI^I2u z-QnfO3~c3l6h7_!N;A2jZnFw>vVUeOAIjm{Wg##Pt~Q$Xqjo5ya_`9pdo6Z<1eMwN z?&DEu^quUFoVYlb(Rgs7|KOS=!#jZG%{%$X^am-@th>ov)zIBBnObz%aEYOeNvC?b$)q%>oW)%HgrzA)j>>pk zqpz9SbX4}P!uOYcoEB1#s}@5Bi@$kD(HZp?|1i|6DoFwnuv`+2$_=#4m;W#G{&<3m z4Pne~>?OSmQYxcXQ$DJ48JWr6-B_N*MCaWDJP!VrK*KlQfk(zIt6*4y1X>6nUlJ+Ujp7zo}wt~5c#zY-B9W#R_TJrGIT=qxRPz$FMu~Fv^_Co|BJf( z#VWBHz{eIzJ;Urw45jnK>TDN`YlTM}F+ih=pcz?U8Y}5Yw^&RFAPIm0UpbxFuS!$5)`>q3xT?G1WEnnEZkvhuwOd7o~K8Hg6ejxCeEw zF%pSV|B@crr43``0o;yx`>v9xzZKsYleRepNc4zJqz4dWe6qB_Pe!_5B={`!* zSgA2UedjfNLt+myI}h>)AQsga-T?TOA!rbKK_;?_&PTuSxw%5uiQ^|LSj_vY101*R zKgK5!3=9MXanRo);HoKTr@FGE1`z;BI=0IU8UJBce%g~4>>L6PgDs))oYWfRh)Eh`0kFTi9{-|jJ-^%yq(%Z-KLP+8nJz9D}Y%bykT!L zB?ZD_*Gb6%T!v?!cpQ|0`0T_gfqrGCV*`pqf>*rIjSl!LF*9mC&#qYmNh%4*P^@C9 zw->QRax^7<;zOAUkBj+<7Vi$aBFw~6)J<0!_O(+mLHNQoKP$C>e}fxbRL`%3pCnA= z%WOI}C_BImPfAr_G?2|H8{!3Q4iHQcP+zl{^B#)_mFEMTXe4Su|0j4YIxEOOKxqR& z5j$EpxDGKP?+GosChYl@d%JU3T-K?Orjf;8|D;#a(VeexTUMfp%8!!($GA-$Z?V5p zhPju2V=>f&?09b@k>)HJ&}Pm9#Sl1%w;Yv1(@c|}_LmvbLYI*ulbGOe=fYi_=LIhm zL(a;Vyrn?N#^w0V1BH;Qv#tj59!rm0#@`Z_m+|+Z?0MSIAEQ5BirZhEP4zerb`PT% zx3Vr-xRiEpZHR@yHrXCCX>o!D2b`RnJL+nF`(dV9l28|ZNW`oJD-pCrtHINx(HA{e zF~N%p^Q%U%Sfos)zuO_qpf6p_pL06ZVWQ&h?5-tNzpJ+MmDKVg+-!@a>!_2Bcz~L2 z02i12d;4Ya{m$xmAxVO>o{O&c%uz=6ja0a%Z$$5lb%l4A#>z*zft?uC@MgI925F5j z5;Saw=BiaQ&v(T}((LRE<*Af6hhZglMDk2o>tkkD5Z;wj#pY^vVmVI^e0t>_pHOJ( z_@3RgVf{S|GlC(2E75XxFd6Pe9zh+WF`#+FmlB!k=7VV7p?tMjcQlEwk-jGVzK+$e zFeo$&EzXTctW;7Z1(0P`ZAJ!q$}j5q2AQ#luhv2YWcHoA!ZnlrdSvp~5dB0a>h-#f z{HTl3-0brJRx^HoY)935PdF=#Vlm)vKB(nw0agl05u6th5D*Y`)-{6VDiuG&cV3)y zIFD|bEn-o{id1u)_-GzSb!k59QGt7WOa3X>_}^x7c$bh6d*t%u$dww{s~#&W4yj}c~8nVg|HTLx4qe@-On1Az322 zg2u`GqS7EknfVw#PvxF5y?tj6X`c|@WxxSRrvl<#n~i{o-v?y8BsY7*;OHb=x!G-( zv4>!5dGy*FFtj>$qoUgDlC`CnR?|7IG9k@v}A{d=FU7o5aPX5?S|tY2LM2R25@Un zs(DcJA3z2n>WL;!4)fs&z2!wikD~9GT6*T7!sr8>kQAVgR8hHn%Qc2Stq`zPox7KU-pwvd3NgPU*oHMf!lwtB-TNPBT6F0D3Xp0K-$jZqrs5BUsU&`o^sVF(af5jV$! zvHB~p~due)Cz!zr%q@!=mYo zJV!Wg@eXNevLq1_%#qKFal$GIsz;?Lu=w;jirLm^e=5H(=a@%FxDK1$hF3I!vG__0 zU6TGeu>OPed~39y=P`H(Pa$jx7&)CfUVIwSo|_v%&wi2-R}r#1Z9E0z(jHtDDAfE^ zlr7nmilzP)0i(eaCCF@|8gJN-9Wa;oF^7T3;%h7iOM2}KCcUm0HCq>Gk(rYBOy=T1 z`&1t#I@RUzCFvkXfLj6Gs9=IRH}IV6*9KDLKNXeA)r$Xl-hVXSh$qQpJfPRxEm#~X z*QZvOPN1RE-pd}?+I7#^*4+&LMDNfV?1Gw0UO5Kk6I0X#0!Vtd$r`oxQ^jgMQ#oT9 zZ|(ZG>hur8njM$(}M=|hk|`RduqFqMtmF_|o>BZdmT;^++r_tuUm~`|e_MrF?=UK2` zLY@ILzJ~lK_CTYa2y1b;5PXk-`ME}f4v8GKS{b0H^L@l{%4P4$?=JUoI329x)fT0h zPr-UPoS^^@J%{YA3|@K?prfL>LYJd$#gJOFk)pKG*O!c(F!Dl^cVn?X3tS4Gyb>iy z11VrkD!!}zLAO7s=B;zp4T*^7uorYZiZM}&MWT|2swcWF#&RFhgzc}52_Y+{Y61@R zSy7r>383W*#sD`mn%B|Mky@$5LE){o5_ei>!gD-?wzeukuLo=Uxief> znuT>0aIL(~Fb)1e-7A7zJ;61D0I@4SRDEKHv{ZMZ%9m3$x{*I3Ke$yz|LxQzw$INz(iSg{wzX)^~ZZFAUHPlwHGI^R(7B^#mbf5??+^}S;hrI zF*6$m4~i#Z3%u6`;2*RvFo*2Ev{Bxt2D~3XUus#Y$3?C1m=q=-RrnoY1Tv?u6~#04l9YVqk7Bx{4_$T1Da=4n)l zbd9;Qn*!DPywC$DiX269$7^25M?k4fviC|KW*a0TQjL}xM|w~f4Vio?U-;;^>pEmd zN^RGg{A7^OzJdCrvZBc@U!F}^-#l~LW4 z@Kg(arzsd;bsJn$z7&}ed*@#+m}dFg=S^mJjsJo7m3x2nB%t!KGg2snTs@$#XLf@`_Q`#=b)h z-O9{|oYbL4&T}wyV#9%KIX+tF)|K_)LXr;~Kys>>)VX7*ylD_PniGspWh!QW!Pah| zW3b$8q84Ty)GY$ykdfgfMiJHNYnSDwGbIsY3LOOEhfAHHc>&mw{i*K9zbc69-Cr6K z;g9Ym{Ht6ZtqB3I`#e)E>LYfiFCuoe+~$zZn>Kd#T%705cU`*(dGVjNmg$42Rvqtb ztWb2Po30pCuS}-Fy(v8y*+=&#A0OKtWzNxXOk8oiG4NwK2m&!+LwK>{-T$D$8PyAWC5?7f~#mqeDfaD}OsP91trwOS8nkQk&=?R@ttGw4qC>P2XhG^KC+_VKZyM`A(LL;rHlTd-->_1)xPeW7*#Qft=SqIJp+k^AW7F>i=*@t_oz< zru71U^>2PZ|C`I(ba(Uo{GXoY^@9H~`aTwLO;!<2a!ubqbcH+uE-XKT`+ljhxoPwe z8>l0?G!9aOITH7sO@D;**Hry$)_+uaK}bY38#C$uVNQODkJ2cgTeKEbOcPub_e`Lr z8!*Nr{2}KcicZ~kU75rragAed|u{t@7zg`f&e&k%{&gup|98e7h2tCu9XCN6QX}TUg0rD}!6j?Opyr zUXpR0KkzROdEg5ei<_DT=2z;<7t6HuDyIn5h{mg{SP7{?)sJI;aSMOoWRtn|uR0!x5#S*Y zu-5$PmefoB+qqCeP)1_x6YF_eA0S-Kp-m8^#I})zV%v@fSDfcz3AH>}%idqBEHc_V zKJ4eYAe7Hn#>!JJ^+#>E)vSMhZrYRL9Lbe4cimlDww80c`Z{YSa(&Zff6Hx*JK#!( zR@{H`hdU9WnEI+cjIA{lSFA2hp1kIK^N}JZy$+=~x~$Io1eLURFbzTtXVD~T(PuASq!|r)`ijwm2H)NCppYh<^}BF%uGwvi%ibw5 z?PD*~GaZc%5HB>abUa7&dVIbgg-d-+;NGL5lv&tY2#M}>jiQXSKf2Ov1Zr%Ag zYV&Kq!+F!=LV%YJ$LlUwxleq`TcR@pM(5i zdRH#amaPGu63%{ES5Q z7u;LZ#wl_+@=V*Qgr83)_l_$TYeKir3}gI-5;8dm>5Uq{32S-Eo}YGGAFaU7D-IwT zJCsWmXoC2)&}F7|jXEt5wNllKDf`n_qSF1WT>YU%xxu_<>z@`JNcE*38Bp54x=QT* zFsQi15TL^4(WSkA5YJAKOjUL77hs#-wBBAUMX}tM5YstOlu2;6 z$WZi&GVCCQho602&AjrtIOlm(&6=Td@E5v{z|s{;?O}R^nGzYnMjTqU+Cdrf%;7K( zF?D|*x=#Pfoyhr?y~Rnkzl_i4H;~=2OfGj;_k% z&KG_3C#6kC21F!6_k2TJ?gfc-@}v!}WIX=@b>rdg6(;a2-m@DaCo$v?yxYY^(aB+K z6JKWdCY*=+P?YoFbIuVtQEgjQ^X5mPCY4DCv(zk=WlF=64@mg&*nJxDiPdR#yB)`z z2AYYimi;}JO8cCvjv5>7`Fo#JqYL%cJhNpkHq=mBxRes_&YdX*>+@a|eO7^9>DDAA zpt!*;ed;}P8E%i3Z>6;P!hoO z<%VdN_UHj#Q5vX44Y~v_gBhQAOQnDoH>E}-15MwIzFN3c*>N|4vZa6$@?R7=t`lVU z<3X5$dOIDbj^q^Uc?Hp)5&y9ym)KdzXVkk`;Vt`X97U46YwsmA!oUs}#TwjE17rAZ zZGst@RH9qFn2N(dT!W7-OmGY(*HD2nuPJ4LTJ{|cw7wL{(VI4(I@rzZI;df4W|}*m zHz^bwLwy|mB3iPUR%XY_Pif5+rVkf1;!q789hZ%_vVzpHXjJ4k+>mdb4%x5!`%+6v zsYd9ny>3sX4%_-h($(U2Hinb}t}bhm(?0VQ+nNbeIw~6&DRnz<@G5%yCCtb+A*$e6 zLe!=-ri~&t!Fj!$vj0oup|URDbcJju+W)+l@iJD;ix%<5Z^8;ef}FY$M?vVxt0 znc?3gs!!xBi+-)?a~aPm)#G`uJ;W$-g(QnJk@H#9_Sgy`1$3r1t;Q*w&I=LrP`DCTU=dzdBz#6 zv;DI(bR$Sx6mXjg8!%#aJA-olQsES$(%v#GoIU+h_b)(}+~Walo8US&d#anwS}%PBjRuDO)QJv0ZWM=`(s+0P?$*Vm-Rn}2 z-_-bSfJUS7PQ?7D;GE(RzFT`6czyaEK|FeEZ?uAVcy>+nrfBk=`yA!G(opGP;FSqr z@_?ILEE}~i5{*a$GPgd5MQ>F*C-~zj!KIgN6KGVEP%@i!zM>Pl()E+N6SH6R)qPtM zqT&#_n41q=PrVL16}Zb3-jW1RS~c*1c?cjR0i0XMeiD9#WDopcBgORklOG(mvm!w= zsS2}c)Ni+&VqKq1ok4>XJS-+%I}neAaqeQ$2A$Y(At=Dk{aLf!=FsVZVd{A<%b3A(i@7gFzYU@zm{LfKE4ZCkRAJYJkuplxe5MTK^? z@?m1YsUc6LD^F*J=Ab7#LdfExj*fQB?;3LoZCA<; zkD&PeJY0t+s^B@Lt>e2AevIr5us1OYBw`&~1Q)yK*g(cEdzmbrm6Q8;Y2*9L0OyO- zU3D#A#xp@>@z&4xn$ql{X7tMypSJ8-&h5Wy6mSw)Cg(pFK$ebk>Mh(_3aX>Qft0>@ z`{LLgH1kzPs41iFkjGJT7Iu>8V+>`_FtYJ~45jr(HxZr#x$Vqn8?rg=FMg2t@<9+e zD<_q(2?vUeEhXGR6i&a>QW!E*dYM{5fuHlduWPzUCMqo2;tIn;<7^SuM9Qi zFceLrWQYd>1<|q|RC?lFf_K{@kS-kT_Zb{*;+&2vE#H*cEcLF{tBpthhi`W2MT^9Ke-*~zQpkxzq@^(zR@jiD zpj7U{=y6hv(l=%CWyu-OY6?MOVXQY3i^LOU+}MqeCAA0j{D>ewh{N^FVmv;eIXM0C z>lC)r#eJS;bJq)pnzfjn+501l7+LD3Y0l-;!dqPgtwMI>`3y;JO88d7GBRJg1Z9Sc zS05*nPbQ|{WLrx`btKnvWDa=!gbmbX_zHQiub+y=bvwStE~tl5x?uLhjzl2OspTu~ z7s4^H+5XkIELoFLRaQ=E2i-1d1vi-quJ`C3vr`U76`Z0WlXR`=JQNRPQkR6+zq}&N zN`>RM3?`n6It$4^Ik&InCYUBJT`9jf(2@3!&a}QUNZGgWNZzL8yQ&@N6*w^K|pQk8+H1-GyBIlrP zig0R{q!HypJeL!cP~hX_iZ~lBicIRkT})R^joH8dFMzyEWSM&$dc63m!hPv33xiX- zTK@L@iRz^i4brR{(|g-~;#9fv&UoF>qB_IbHo2ATO9$D~ESOn6LTO08ckxMveu`@9 zN$~nngh^vC*L&hpJY$Y4{8&FA#(<-=kNaQKpMP{dc;YdW`BJFGP(OhnBi>SdcfIdI z7}jzOd_DqsM%tZYUnkrit*?O_%WUeukl;$4;B4d~>U`AXV$jusUqjaWK`@7PCr)H0 z*P6~$dTl5wom5g?9pBusT}6AYE!^^WDWe6&?XaQt$>HH_P|JOm@!&HnEn}BOJ_6s% z=RoDj$EDjf8{iE;A2c#SB_*ZP{qnj1d@d)(_-4a9e-oUS3G#)G6R#zF6KQgQeWl|i zsxW~8R^-}PS(8RHPcWUK(L6&@2&H(}XAHyad1=bpc00Fvn31h6c^2%9>il`FdE)We zK`-e-{3HAG{X$`a?Te%7g=`Os^TY1qqy1g2lOusXzW#Eqlk6cn<5{xv{n3i^DnB4V z0<+?~t~Q-I8S;CfvmUnKOjbf=R!i$tA}4NmGcpO5a|s5K;|8ZAM^|eqG2dUSQVjQC zy(|hSR##CSjxSGH7DJE+Tx2CV=ccw&xs@!af|{$|sRpxAQ!bm|j5*Iyp72M1M%%Vl z5R%-0@#32*7K-N|(nUuk3$sS2{UXB^YUv)R$(m9tFHJ#QlGbAXo^}>Io zddMFclxg|W#Im%2-3|p#moC<+m&F^uWW)m{D7_Y2S5}eH6au)Ejcf}V33LqUVx4># z6sl4{u6)1%Lq@}iXwqVjBl+U(C$VYl%7)W>(E^ugKQ8b=O>V zg2Rjt!lQKIItRrP1F4$hyme^F)_>H<|+aNWmg=ALa zYvgRv^CquJd%1fY>6RL&xp`e85ttH>i=C09W|%PstFZz;ha`{y^*jDQ>USiod5iDK zh{muIt)HCHf!5>Nrd!zUm!9Y++AHBF1Kxt-T5YMi3IRlqy1P~gTJhc2c=pr}>VL9vBF4GdGSzsz%D2Md<|-l^D81FmsyA%eQQ_55 zzBJns1hogsVXRb`IakIjtwo-RZteC-v|uQxhTKP~I#D5LvS$FNPzJb!L||#S(3@2) zrO!+ZiU<>O4dp061Bv)N`486jPM#^=vDHxc+uJEcPIW`8<2*W2K49^{1Dxt{jNQz8 zF6i)&ojo~??i-{^w&r&W#oJ2PKKV=%-h!@HJvVtHw060Ffm11dUeRuqzM}vI3Bo)i7i1?77h+k!sTIqvc~}x`9}+TdoV6QqO>J+t z(RrjMzTb-{P9F>%`H^xAdwC`YW4!S$0N_kP+{weh$%6mLLer zKtbtqass|ps8k<@Rg3G^;&Ip!6oXCz1xu3G`Fg$b41MxcR`Q4M9h8}bzaw_`N0w$} zZ%KfJa0%^XQk+VyMX4=}TI&QG*&1P!rZ(@aBd1cURAu(`GGx@D8y*dG!3Z)E<=B*) zEL(z1caI+IL55Uq?M^rMdFCU}X8F3BHS}f7`{jDAPpX|SJ}A5<@T}Vzt-6SH>9Q_0 zo1moSv^P1O4nouJ94EExI+JMnIuc%ITzC3`?}TH2z?fN<$-MQ(J&C>(`EWWNV z<}0p2gTA%fXh;u4J-+q|c^Qe7UA2-U>o?Av1fB4GMV!)+b-_*RsMN?~HCMTP2#H*h zk~@X#Ne;nPrRP-2I#>Hlk;zkJzFLB;pj=lpLW|TpxJ-*t4i{eML_l4*dBSDd;e0bT zu_;$U!>1g!^1jEsK8YkC_4RJK{l=u_M(qK!PZ_SS;OR=;x$)OT9-cbci}N#*0BL#y z$vypzyBdeDuI%tpmmBwoAn6$RSmw3!wBF_b9FgKsr@ucYGf1sB)Q`h5WRoNnGYs~k z*?ud~0rAJHe)q(~LQw2uK=wnS(xm3~Zo!TgSrqW992tkgS-#qi<2G*#2k?jp)Y( zLkYZj5*(J(s5whXF4KMejCtb6&fu=Z#O$j6!WP^L)C?B$Nom{3wDx^`Ijg>O8l@yn zLAm%U`ChK2$vhd=gY;#dzQNpyhba1e_NgDzQ7ICVpYB!5VH#y;0ai=eFj1_%C%pr5 zqNvyJ{jvut5v2HG?;vE0t~qwfLVlNWrAz^B-R=94Fo{B{)f!IMgt6P!E}`LaG{TYS zsx3Xvm0sj!c*JUP0R=x~k3ZMiD<%LloOo>_9F5Rb>;_nlV3_+P)E?uy=dfKBlrri< zLM1pj?#jl62S{5goSx?=&hxdNCxy8>`&8EgB_x)SliSj-ETrAS|^w z<3pB!Z9fIJpU(TW)_0$e5d3f~+m^ZaRjP(o*;o(*Q!QWL1{W2=lYs_IXHOxcywIS- zLSx3DrJaVAc?P22iXOtBVv|;Lg<|W2%R!3i>v*DX-}5*P{W|-v3iB+ zKIHhq`J`!ULd7jnLd3Eq9_7jx?GF`f+yveFPVxWfx^vB-^pcdbMJ}bk**iwh%BYX z#X6J(5ph|?b-x`6G_pNlIRfOf3sw_SLpWiHVkDOrO{YNlt*w0NW}^aZuifQD1PrPBTS6&&+4^*i;nRBom6*-EYI+_e>Ji^kjeG8qjx$Ft#etX?aVCQZBA75_) z73KGR4-3+ubazREfRv<2gLF!VpmcY)G)jXYp_DWN(%m85-92=}|Dt~U*6)4Snl)l2$&FjC=VgkyjTad`&V0GQT?Ol9znb7@NqL1?Z4$qDBWO>2yiNZpT%iXtM2 z2}qf%finB>sc5y>ty^5ET>h?Y_>=%3H~3^&oo-93PAaCv%ZXg=@6ONXYkw$==j?u7 zIjk@nh}6AV8+6ACjPzLJbxAPTj3*?qz+v-8TN6n4xHlTp!Nws#%-U! zyP59*BCTa#WaeN(t8Z8Fnp`v1FKZ9G83sY3Rka$YnW2;|8&u1?8BgA37bKg=MlCCN zb1e3KIa~1N-SuI7Ll~%ZTO+-$m(86APD^Z8yU$09Jh4#)udTyK1qHghyO+v`WkW+* z>bd8x=Fawxo^NVWdfgtS9lcY#1^-=yZLV1VB?>+pRp4V{0+Uo1WA`P@w4C9+M3dC_ zURRY~J%I!knDgiDTuU37k^K$fAEr(|rd?AhrVVrT5B=zWzWuFguX@|y9J1(8!StPh z*lmFSto3+vc=`LdPN?Ng9G(q|ZYq zFSi`)h>`gFD=z`}O8vX3ioPAAZav@J&7PB5og72Fw&SGEbe;<2w^(Zy2w{UF$8Y1`3DhZ%)yT*gHal8XU`>G1YRJ= zt>57B70cD`YZncOH#={X;2v5yysBhf11PNaBm&vpP3C;Z$mUU=FBZR>{rjFWbgnta zz6-YijG>E*RsLQREhcXVnS^{oBk_JvHa4qob+HEU<(~C!zd97lPM5Tva?LA8|q)aQv&&4sHS~yOQoP$Cj z&r>b_z59=*mr?{YBw`7;FBgt_Uk|OUFV*N?3D?uy2;Z^mR`GzoFV&Z?lJDc@f&^~P zM6p2q?dm{l^F`4rmR}@3N!r!873n4;e8C;qO8d)wR)yDNoeNEVhI=zV9@URgM)G0c zYAf>)ZEg%^=7*ol4|i2N?UR^|ed}`SOW|R^Td1khxwO7MUuI|^X4Px-=97@G6)f<& zyPA)TbYG#oSaYh|*0$*&cG}T+leVvWRz2QkH^3i1&U>{}wwxYB((PF?46%a#f_k@K zd(?l;7mC9aa$g>yVl!%2m$!3fp`HOiiWD?ICec$3pn5|5qRqGH>By|7Eh<-`!jrbJ z3BTyImvoRM!$YUKA9f|>t?Nf&eUNAhNs>a6)w1j3aJe&`N~;JbEd-a+Ghi*=1__DI$X7{|;MBwAi;XAumHAfb7 zD~EOa9xQVQ5nGhAjOxXo$8xn#`6dH!brRqWPiN*v=H~6rh@<1sDaAV;rCmuWnWdio zktg*DEmUaMlgRO3$P+nTPn@rWb8-0Lm>DQxIWPX8DMP1e82SA53+jTL=PZv3c}Nl{ zBJ@L=JF%>)47B|Ey{Z_pQJZjCv=akDTP)cOn7pF<3~F9+H`sS<$GMCQacU;dvz%)29+16Kd0AMe_+BT()Y3zV^n7os85 z8l-VW>X;8N<3%+FJmknfUh=ab+kDg2MW3y4iU1lHqdRTgo7ZjQuhJv2kS5Fc+Z={? zxQut_;-#NI!DrU0rQkG<0IXKuL-e-wsc~1+fsCE=V<$8DQxuZUcY3D|yO(=jmz(iN zgW~@_fxr|qftBORY4KI8?$&kF?%Akn!PxFAQ=^b(76&CWx0sU%FEAQPWS#lywnMU@ zuI$$sC8^h+4YT_vi~Z(9i*7pvTfYGYxt520dH;4T>HPF=MtkUH?8i%%D6#i|&rob6 z3-5)LrQI-IeUUg5<66UQI2Uz4Elcb3>?&a*MW=w}oHCv#a=pD+H=M0@xNuQ|P--vN z%->jskGrh%&f^D=i6KE}eHJGOrwBk%L?ef#9J)}#BZI{ls_+Qkix#txpfq6AD5Yvx z%TdTYv=%j0BRHXm1!U2ecpU<-%i**o0O$2pV&6)POQ*+TVM(r5El2c-M*%^uy*q(6 zMF8#4Tih$auZI~$l{R#d<9~_f8!BhetW>>K+y~LaNFYh1nua6x*{dGSxkJ#s9oBqn zU-+!FO+lPEHqt&?Oi(gYF3Xd8*d)z^v;Ov^2scl>!Sm)YnnCrOdbHcuapezo`*WiW z>301*>&nFAF=~J!@AAU}gQP6F`$qoNyo7-(LFznSnH5cgL>^KRj!-#%_B^OY=ThEd z!CMFY-8+#yxg-j)`1Z>GGuH-!MZd`YdW~zVdJT7k3GI7LBQ-f>)9d#2jHB6U zK@I>rnIFnsrd?P2yl^a3qK&f5j0kuyc1` z(g%?dOy$UVY$;ukze@^vwknSF0#r5|5|or#W?3IjZ+aT8r3FSM>(e(Pu^ve12aVL5 z71cQw4YDadrGS0W-P1!E?tf5V<-s6u_G1A1lm7lF7DQq(0PC5=xa_bhmK2h(mdk3a zf`URc%VzaEO`9Kgw^xSE{)iV}pIwS1Ra2!QSLY}u0$q@5 zWSWh4M_CZOUk+g?%pr(g&ey#$C9$5|hM5d}#xJw@dQ+1__o7>kViMQuivEJCVPs6X z{-V!njWRGRS5Q%Mm&IRDQDSA~qg;O_D#0__A!YKuk}%%0p?{zJv0#k^WV!|YJ!wzZ zM!0#B+m~a^2_hptRgY+l%l<(zz&g7j#hmQb=w{-5pE>dmTD~cd*A1KYI`6w4x``xU zl(6Bp^fd#O`sYOC+I_#Y5Qe$q(^_2Qwf+d+{i-jCXF2b<1j7ZQ1VjqW=)wwrmb)o7 zjC~m-0*&t;&N{R`@hp+|#Doh17=mQgA2=|hKB2Nd{%p0&<7`$9J4*uotJxO?1^~6j zrO|xi(ysaXK^T#x;o4RpN%+-pL>MtIr@+mooEq_KJnc-=mya0eB%+}SaxDRKrc$@y zi@bE7`Hd>vSbpIqW8OjIhO}w3guSUu+Zm*o)ltrr5(Xg@5MbJeqGJPKj zVwW%&yx5_26pf;wpnvsz$c+YV2mwZd9P0K;X5!7ht<#U18R{Q1N1pS=Wo7JT>jPjbjC+jRDbn7>=PtIYkKzy5?!rdTHS{Sh*7^ELg_SU__5Sqt+^PL>qF_5 zWJ%)R74a94DC@}=%cMTf{3-ZMHtN@#H}b;3%{UfaQX;$FnH^|p)NHZr&ji=ev%o{L z7HIXez4S{#6Hg8r(aq&aSptr*TI2AG-KpD8fA= zFnT8FdYlu@Ws;|y`{5OyUVTI@Gn&vGJ_}MTy^7)@FWOXd9C=EhhOz;U+UI&6or8Lh zD|3fMdwhQ^rR=GQhHz^FAi?LjENw$rR3LNp8U3cAdxcuP>uNjWcHuDrrSt zn-T^Y)W1LEFn+eUkK_%@Th`h6%eZXY;R|kJZ#updM2xf+_~uK~(OUoSYbhZ_6IEmo zBEvLxa?F*cRG7v3`TfWFv5q#%KTB98s@Dk-&vMCsoVUA9T1W2YXlavA%Gu5D!*0^Q zQvVTp?B!(Xn>TZ{F7!kq@yyzcVz`)?l$m1Tii;E}iM+xtF!1o2$NB<;EuNoI$e+(} z1XJ3#;MuV`r{#d&z82G)t23@bP2?xXXm~l^XoL%^h2;JrlfyquftYMU2#;5{oafQ z+LFP^)+l{gYldiOSYlryySSpFVvG}f=Fjl3Np=xoVc%i?l;;joWQ@V#;n@C$JT8Y> zgE-`TeEN?Fo;|I89}&TM*g0cA{AwuuC43tk5gZC08Nb_U%)xSpbR-GC?Qxd=pvGew zW3g&@B)gAb`3SSuSP27E6TQ73U<*^RvMOlvSTA@Jl&Nr}{>u=4nS*n%(D?S1a}JH; zOXwdoXjtmPVko<@R}m8r|79=aQC0>alWB7C+S7MRWLGop@%CF&Wj}>hMSd^KVOrxu z!?ZXqT~Fa9+0-bj^8J*-<+OM*rWC9`b{Gl@3Zw2Ay420yAUB`4dW+2g(5$evv>VL< zZK1e59i(22kidmPSU5O>S1j7qMk@vE_(|C;{#ob1UmI->eff}@YN_3}ybdCVSQ>(z zL4`qENY~lC7fa>5XAS6}SJj3_rE>m&$7PukhR51$?2KtA*uO%|>T3;SfG}%&Sdl)S zuU?Xo8tVs-O5pO}j^jTzzBkNus8GHC#rbyRkJ6?6Z^S9uh5Jl5miSA$BRrbFO{xe} z$b57WRe+g1J-yWFjPqh~8(*h^#%iGf#`R?Di$6+qbTpj5Rm06#ryIk=1F?@!UMast z4}NY>E(-M0u)gs_U!4P5R#7d~qT6q{tH+Svz$TORPJm7q z;hj!z=>_ht?R>I>k#U(33Mp4Qyzbt(Kp(B( zp^1r9ZjD-WM2mKY4Q7h>01pN#WBGQwh@?;X(#2Tu{(J~ul z+jrh&f^y%{^uqJtnx|~F$<4J#lSOdz>f<8Hf8C$|_^}brr`2D*X+xlwIX}oRiWflA+xv-E&2eWU-PJ$pVJcbRBf=-ZOJn#?LB0|TxKC`% zr6vd$Jz&A-nzjFk6y*Dq3X%X78Xg`CdN(&mYi&i;(PuL+NkU4>g3F@wtiOOALbdRU)%WCv&&E=x*lMCU9R?20HA6GmLx?YZI*R-yA5Q;r z>w8Swi^Df{X2~5qc58U6Jn!kkLSC;`Zx?<9b+MONbmdErgf^^(>%&kRUAM$n$(-*s zXcZegPv=$09}hkv5qkT_QT}$9|MML1v2nBk&dtR$`y+3G8ilIJ?s{uiI#N66eOk z&(IW)`O77uDMXEaetyB@WSTel`NhL!886&0XjBu>V{CC;rPl<_>h|d=a{`-H{l!-s z$L)ejdKOZ#rdSyezrrb;)oiE`*y!0_{u3Sk=W_2CW5EvQP)5e*Q{#aR4K|tc7uwe& zHGA0}dy>|Tzt>8K+S+hm^NVXi^%e;-hmEeWaiH3MN52K8(D{32?P@$+@SR;=aHEtF z3wmmS-0n$X$b()FXEs5HEoGnh_X?R}lh`U*U%z%|Mheh_6j7}*uydiXX76z2Y3a!* znDCUj16(2d;acy#S9}Qn_!%xw?A^Wh1jyQ?KeTuxNo|U}eO#kq{7~;&nYI ztCOAwks*xhLu3xXjmYD>`^UikJ>B@%Yg#Rs(p!w3Ri2wl{aD63v-E1|N|KwbccB$J z^^`R0ybsg<1@+!7|y#EknA<96M95Iw3ip-%C{Bq&@ z(zvb{wtvUZT>wo+6Ixci5IpHFQ7h6pJe{`6ug7J2O$BMTUjO7oPmWa8pC+hVf4+pg zst-*{FLufXl#@SyCi}5l!5GOp`TRg0svUt<1~6+Xk!TyBsa*9wii30!6M+(K z>Y*=!HiBOW_|mZ;icAv0Uha(Keq_|FkkRfSYDx##Y$@n8g%X_kDE9xq8K+?mMPy2x zZzDq#Cl9ep2<_ zwAwBe?{PieAO~b{6h_)-Lj?M$UO4hjX_h)F>3HT?YU#KHmdU<(+a4xgc^p7DrXD_K z^SnMccaV-_q$@KSz(J7yRv`Q7d8yer^Tl>igK}woVVc+Rp!j6%>y2?fm!0vVv_o!I z*WqS_6_s=!|pY|#QT`$+T ztuIhvrNI(}x~Gz5S@rI-zj(}nhiMAbuh8l}fKj#hH95S*N~^NDll>#RPjE28i*qFP zM7BMzw9Lq&`b#Mn7FxB!l(z7a8q%czz+EE<3+WA#z$pk>TNjIJr}+E(?^Hj>P2z{JDJFO8& zFas;$LkZ1j8t!gu!iac=spWvzTiN~Pt)NMeXJ zbb9a!Z%{R)cCgY)m!mMnA1ep7h!QfSKZ~2n+baC*U$*?8CB^UG&(p|@gra*)CJ&z< zE>{|zga}gC+T*)eb_Zd+E{)-`>Zt$i^Cif{p-JnHe9b4z#azB5@HvP@oMHS;fTnP3KSh` zv}$M)T>BTn1+M)u`g}o;i4_-_$xd@LD+`rDCKXWEnajV1iHn=v`WQ^>>d&oC;jt4N z8X6kOsLQmWMCn>=33>y}Fhps4tN>OA$G+vn-yg<@46Yf~?B!c*XG`x{hjH!hZJ0wm z7U7}={QD~Fy=02V9s)zD$CV?G=XF(9cD8gl0f+KiENw=gTMPVD%yuLj5m^JIa(KC5 z3oZ2X#$*^6-YPYaI+eYrFc0rh#b0v#?7!U=x>g!I`uf)L+lw@~Xs(f;qCV5N|#K!Juf zD?%PSAuBDWDgU|soKoxO_L{v20tS^6%8|_%CK%WVKIRQW@5Xe%s?cw~S)(khvDK|( zYDdcSwK~@uE=!R;+#Rz zQjW#Ulr$DM$3OLs5YyahuJ~;y=0BhBBP`ySZ-9hA$NZKTUB6)3fyo}iP5#FSsD4&^ zsA+KT^HP7E$nsEAkXcL~h|YWhg<+j0R2SF>^ZtKaw#$nJnM0RJFNkx6rgp8fhN9vy zN3>{|RKs~qSW!`pTV*E;Lmk195F^zacTX)`tq5Pmu;0E5st=sSQgsf|hHNpqnKR)Mk z6gurz!G-yJsm^24nf&T(H6$wqKVlKbm8vzKby5oRcq}V)(?NekiA>!q~wb z$e=pq0Lnj$;q8S6NlGvnC}avDt5w8edfhAd);8|<3JQomn4{BUeVq{Lcja_3GQp#c z0ip5%vM~uzA$|cl=hjA7{Uq)`Z}o3w4);qkDsK5aRp$J>RKc_-pn9?Wn%MW8S{klr zwokOh5NixThjj^Os#9=ym zWWB!=@n>i9z5Hx>^8^Enh~ix?9r*T@BY7WJs|VTZ%5X6t#LytZ#K9utocDY4DE?#K ze^<{(Sdwf}xim@bM=5$>JdzX5RIaw+MY^dAk1eC>k&KPj&@em9;tQ=3%8sS5tA+4ilg~rEM;zD)2nNHvr@vw9bKy%|@do>=&_JSH)80D!GSC3%~6H zi3`d(bK-}^$3q!~DwFQX`#Q5x1=`KYO35I^^-+>kP(Od6j^013`gdE_%8*e~D)nb) z<7(N50zA{=UQlUevBh z2H}WfWPICe5#Q@F+t3sNJ6Vbz|1NgSy||^ey3!N_61bz^y9+#%Jh9%{cyVEKDl@u$ zE?HDHQ=?XYbI_ro-Z!OeT74}t@q3}jy@#bMfx(c z&$;3u#q!DH8WMjab1`C8svr$yXuHsbuUA}a2T?LCQe`<;lr^AcAYk|RIvoZJ)xq^q z(Q=E-THMVMyV$GE{oTNS@9*yyjWVC@f}R{sY-oNl>e92bDLmZ7#}{)GzfmoEEla?W zQ>f@yt@1@n*&mBRRT+(BWgYnc9nAf%F=F`e`h>sr7X%fW*V!G7$5te zg_M^^94VZZcQl`+g@48>3cx&zsvj|FBXnq?)?A?zP%q=N_)wM^TnH6sma2C zN%h-(&aTc{T1-;nof4k*HX3{n6?)Ql0IX}-+oEGi9{)}&`|4G^OG8oIzUQrY|0lWF zTq`dc(z=@*|Jd%S7(<0bn!leE@WH!-UukFd)9RB~0@lak8hy{z%DC`=|x4M`GhsNuKP!HVHt{t+UK#)LgA+u-u+( zZ%gA0p;swq`TCSTk*%r8$F`%&R_6r4|959TWt z#NElS$ow`EcQ9^t>SXj>j=Eji)ip_OAi(dRs`bCVmo9~TmIS(sbL#^{6vo29o0}9b zW#>n7z54S$%X!HdZ4a}|jjA-Yb>^wfFdq|;a@#i-2uuY36HFVq5S_JTqLW;8q7jf; zc9TW8O@F_C10@;;hH3AG`Qa!rvKaY4H}!uC8y!kC5*+&Kw~OcRVJ6Gpcopt{H@;!! zapo2~8LoY2;q$hIIFu37xSaoE%91Ru`Cmk1Arl6gkAH6jD};`(FXraA%{a#k6-&xQ zyRVgt#1<^f5|%ps`Z7MkatpniO197G6h3^wSUJuW;c+^ZNl#;^0miYAAS7=c9K($1 zKpm?h-ofM?`{&xOLmOXll}86ikL60<)!Wx0K`k-+^!A1xj-qqiViXF^{ikn=$^YJ&eRN1|0tJV3B0f&PF1`D{!$R==t!WfQgn=Js5AYIj=})@< zBwM+K!!OdMW*uIoW>;Redh@a`g={W{R%e|0Nx+|x_@T756tX=r-;I=i0rnsfg3F?? zvEGOH2M+gUf*#F}i?9Bk-&y}~uynDZ?n9KbSjG%OpN~AzzWaHo2x+8R<&Q6S-xW)Q z@ii-ZOLh)ddk$ACz3@(^DopiO68~Bv?KKP^{nsk{&5W$Ok-sJDCQ+>R^WJCom4PWPrVA}+li{un3J1o!To-6u;4(bwMk&ho;r zNluaRbHX#rDJil-?_%h`S?I0ELi_e>I@UJoSLN3o^MtE~(}iU`H|593rEc@A4!c8b z(iq9suVq`QXrbDs!fRGDRN>4fYYS(_ipsiqFKM2f#-L-soG2;{uZoywFsj$8J|^bL zO#-vu6sqmtZsic_Q2rBuX!T%xUrI~W!R>i@xhaGFZOfs9$FGD{%oWh`d%dhBKo+40 z24SzP3_spJpt!$;P+XQgSWg7{JhdWbKxmzs5a}dd)w&!Rf$8#j`#_#1PvQ=FQ8W|u zsapI2bck16@5ejN>*NBz99!%x$sw}Ct zq|%vYXYz6N8_p&CqF0n$upk|8rA5p{!m_P`on-wvr)jQeITI#~VCuNH1|dzA_8$SCWBd@)?l6Xr=VIA>)%Ar@;!}Lq@_2jmadj43 z=ah>!=%8`J(IRF{Qo?VDb7hfj`Y(0<@>C$rR>>dza3EK=b&AKw@Ku7}3MEUKn_mg< z=^w{V3xN?9E^Wm!8KA;GFR}H$r>{IOR&jlzpuuRjrw-*O13-}vQxlw#kzuY~q3O8R zi!UiB7hn3OM+zXHJ7WyJxNwh&X!Z5=lZqKM=3Lf^@i|Om=1XZM1}-KB{iH-nOea;& z>Y>^~rL`^Axnp);X_m**PkAK_-h1OlbRP_A=r1xxJhA4Zye>-#IGk_GcIGb9c4n#- z*x1-sqSi{5KHd~{wtw$gy-bFa`sCWrgfO|4_U4bTk4l4qezq?SsfCaBl#jZJQoVP> z+T&(Mi=<|&%0n9O1)|AD^J3To=SoHSRgAITCN2R_>HcF#vBVueSD96jUM=iHA#pNK zE$Y_sOt3#edw&SZTkL18hYLY*hKcI1fy-nJ%=s-Bv z*GKpCk5;1owIv}A&`n7R&|(8v^|w?!o>mynSG$!eXXg(mI-@Bq=C**$DXQxuO}-iE zQBv}Kkc?e`BnWs8d;#|=`nu%^-j$mDdUjL3%Z*Gni>YVJGoNJi^gi#*H|WkeFF%Sr zQTZZ;%@Kpu$Eyj(a=rmGXC?!2(9k}&q65}877Qou4o0WUQlPwIo__iA<(tkZ^1~*0 z(#}+VHzU9d4S@l<<~OBeRtQv2TItZ+=99QWdV6IYhC%w@?Z9;!zsAHX;q32b6MtlU zT#F1cnIiPYb!5delAH3Mb|%kQxEWT91lJHna3H~cBd==UTexE zn*{^*=Tn!j2$?2Gk#%hH+e?v~B_$;oxVSX$-n}!;rg3x|E7H}aprF+{z%zFQIL1OH zj6G|cM&Jcx+Uaw#3?CpN!4iC}P4PIXO{I2Cw7jN@P zdXR8@036i~lpNVO4o}&tj@s=~1U#xo^3_7@FxuAnObOVlQ)3_}x_BYl3=>DxO7Gbye5v=KIcc zr5v~bE5yhB^_2Mlr@9BwCu?2G7Mub%Hr@^f5Q=s5I>$c1VQRQD4XlRuoZl;S$?%S% zV9Tpj+wbZB6_}GAv})E}qQ+Ga7X{Nllf#C*X1zMv#IRe+L8*3&allOd_+J_MuOFR* zAGBeSKyP+g7L!3H3?sXM2eaM5(P1c-#@Z~XoWrOZYqu18-Sp(aAdKUqZO6;wcNfc% zf-t-uPJ1)COYmFcGd#_&F2RggrX+(BMCK8?uf>hczwjGcdzghLB*)DmYF3uRb>oZ; zv;GvGyio8@d+s}ellQbNkq-j-p*rr_6TlIxdNgac`@$jqn@*&qHqwE^Ajm-oj-5bAZ9{zMs@9kSnWDgSt9n%avgm4US*vb&1rYt>K8 z{Qsiqe-nq?(w}q6ur*k$5Uv6AeJ=(`77k&1(=BmNGB=ZMg<{beu;!efJG*S82O0Gy zusS+fy?(YJSr2pxIGPns`#Kk3o0x0cRs+7oqFp%LA@gW`Koj@iA5_Pd3LGCme=k4C z8waP*(5d7pM_8ygeGmf3-D-B4=LgV>K!~g3vWhJ!Dai_|x@600f`{k#q@3=dgy(I_ zc~@;8n9RqhS`?|U zPkp};Og!I^@6!}jmB+xdX8Kt|RyIz% z#u0yg4W;!v!b3RCAGTkRW)155Yu6EFh`5wFt+WcB*S~>S|1g`tL;T5C@=G}KnG6Gu zXN~Sh{d%A2+iW@N?q$Qhp<0z4#^9H$3BPUoKc30v5kyFNK6A$WsL2g82yu6_kK+=P zIf|wfM{0K52o!bPf%(L3lhMaLiKY-n$R&GwbEccC+fY9zLrh+(Uhn=x6kiVM2~?|8 z!**d!SQGh72Y|f8?bLvH9t@~88Q@HMx)SQ+dw-#*Xy=DLEGipN?#7$xR6xy?#YZ3D z5j&n$qzK&gwv)i)sZa;&2SIjSshQ#_l~l|^JkF99E}d|Y zovA80{P1Al6_7^X!GsPM1Ho*MH`GJrb1MIV(lf5K{{)#vJ($Hv<2^IsX{A)^-k=FEJe6z|rLK*Z6*giLy zcEM*7vNY6rgfoJdl0Qn$EaqzP-F^WksOVKUd+)`Pdiq>{`zwd(hj_ZN6QAhNWPsJO0q)*C?D#(w%A4^?}W(6U~**CXs>=82kifvGtA{TC@2Nm}!DhW6L`^kv)nNlt6MB!+Nt44_r+KU`Ytyu{_#v zwp=`*oD=f%t^v)$*xh)#8n0VH+p`JNoVYt(NjOTtVQRFJX!A)Z012Dw>C>MO5o6+t zz8T6^VXxbZZ2GBUeZLvk?Si$QUbo=Jk*;N`)oY&7Y0Ah3+VG@`L-~Q7U%*MN+-IAz zF*d_7VeD!|C%3)#=!flY^OA_WHG%(wuHOgfC|+nRrIm;@@z;jpOF)ccd>rs8Se?v9 zOd_2tBrGfj{7MxZl=5Z=L5w?!!)#3A>U8(Sln+N9T7AU~j215$#tZcs5WK5}f#JTR z3~7_mj`DeDu+D8bh5Hk}Uq4!iob0LBtvfQgD3J0TBKZtZ9GT$q3CvixJPNF&@+WEL zah%Bpl>J&?@~9Ju2m<6aR3wHmDzCBf;NTz#>ohz-38;V)`3MleN+ceTx*#(^QP-gm zJnPK)sD>}shrRUSo~D09$Osw+WU1kxDE)C>~dsd~kK{L{i<{{k4AjC+ydqq=cm%o6bGP7Ku`K!BbQA`eOJbr8d3R2(oXz4+7^iLP>Bm+i8a+ps@b|ib z?z(#R0Sw;3O7qE#Inot$xCeo=8)=@Gxq6jnAPJzNrr% zL3^Y{cCyE*7at49&3{fdicGs7brPxu`Ai*i}7pPu;>nM<++Pg3)a7^eFT??F&6W zmP%K0yh9i;tcHb!eSL*d8Vv1M+6R$~Ah{^TgI&t3F{2cZ zC`VJ8KsxdXpl?-mY9kA*y0V=1(i2lhWXyZ9pl8ABw1<@i#2+uTn=|&nsWMrBZ5&f! z>L*7z2hC#204PEmxP>gXm_m^vuNnI}KST^|L!)Zsefb`Px>sdqjXb3>LKi`%H&DCI zI{p?281k2(9>V2ZB;q3Uo$yC@KraBrNese+n-@mQAqmi-X4U%foULSq1X@dipq+=a zrR6?%E@mJKjYZq6z1pieoO9c!Ll4co(mPPn9jV!^n2ve6+JY(=LzC*sxC5H zX_a1TfIFR97-S?py2`v)79nx zWE>T8#wPAv{5d)|10=Zyt1)wO+UBdUHnmyE5n|j#E)W#ItxFKc=;VOf`PSjo@_||! zCiLv7f0uXFi^`Q1qrm_uG~E~0COSl8+b6O?Ck;XpV_Y$gc_6|9cels=7C+peJ9ci^2KrXaVq?u4B2B z^>-&fpYm{cPkbvTZ1p^!8+x=~OsSud$W?y?xrrHeN|@t{j6nSMxIs80wGT3IpNAk8jh6@**mT4-B|mIdn!r$; z$crTIQD5mWsAYhQM4z}ZXUZzxd&p~N!NjLA-ok@3jt<@S8tAnjjxDklbBD_lsiU05 z>9b-OJe)07njSkEE=rDc4(d%exQCvO-eez6EIt|xW`c?+2uKED4tb`hCEvdct_TtB zTIi|2sHYnotMu94Y&iG9>6Qaf0J~_E;3(>KJa1l2c(e~mwyMvCVOX=(bV0z-@ig-PF|7&F;^fRXlojFPjB9 z?iPk;p*bSmu_NTGUz#?a!1hTo#-&3r&2gKfyKJVQCR_;#jN^G?pY+j)*5aF&XXQ{( zh&Vrwrh5&a-bF0-RsmH+j1wiopsf@d5|*mpQ`BaRYv%MLfAw}o|8fo|vpF=9AzL{z z0cNp!*Y;m}7zR zSk|odpk`UdXhnpe__#C?LbYN}%%aL6R_m9*kWk%=GjVzNglWCCPt=ut7gHf?P?5L* zaA7HH$`3e+Fg|ivK_4@msj`71^iLH^tLgwMxX}r63_Ml?|G{Gb`E;|x35PrP^xFAA z8f%fK2l*FY>*z8DorUr4RkbKMB##4-O)ti<=?FBWDm4QUl#s9lHlwxhNCqKJ7m3%k z&CRn8%B`)!4f5ySxG4TuKI(AkCcXR2IH^zMQq3#n5NIKf8;hk0Q~7b2wP-=srG#8q z6=CvT``{9qY0IEVY5pYK&g@|i(7+J3A=KC{wIZ&@J|v-o+}1;yk#XkaK+`~$2nyQl zXD_p!(6dLv=(9bK){tL3f*}I^uQ?d+R#zUiyY}+Y_@p2#tp9l72lqICGgV)njwYR~ z6~FocbehR|FGuq2Q3RVm%pffM+H==>Ane6SQ*r&l6$t`CeFqq=l^2Ng7|6dA(wgRo z++;nrL+^4->Z;O|G=mQyd?}K9!TfgBUY=S(Z($lL$(p2*{7cEn$+v3o8{(T5ULemO z0L0pXTf}S*ONqO6^| z0xdeDIysD=B?8CbHx2fi5pD`C-+F1hfR{m!RgfR9p%H>{CTLVU-0e%aTD2`?SYSHQ z2eNM>vT%cN-@xeV?y?6sK@MgB8L_2{gylcVxCK2g`dlN>IN7~Ne%`P;WW75j5 zO$-$tH9ra#T1Xczs&PIrzgjp{UdY1fR5m z0erds7FFFfSoiHV56zRlH|GK`XrGv%ELI=Ov-FR!G4NITXPMngXK(9xSJJ2(a!U-?9lsOs2*RhO z(89k02;CVt2xgnQ4E0s~r)kcvK#ra)2e&hRy-R)>=zndMfBXzY6Z%x`?G29PoGekz zP2bo3ZjSgLNtHTpU*sT^B{VB|slRPEc!aZ|ul_7iz{9oT38?GQ8IQLEMb|aTj5o6< zBPQrj(V=k~9IS{S^q>=7YV{q3=EHpGU_#@8)B+fM)%X!QgN_yz&NU=I6C{6!;>Rc6 zW_XqLIgxhGn@^GHHorSD`zIEGtk^8pTRLrI5WXF+mvc(JrnA zqA{s~YI}b!WD~yELK9F2(85xnpqJA$fV-)_m;fhcVAr>(a-$sk)c27*U$AJ7OMvze zHeU=QCwd@)UF|F>_`0&qpkNOQcAPA!*aUP($?rpZsxAb;?oE@rtVElS9vynL^qVl& zCIw(k!nHzUbiRZ(_JI@|M;eszO=dgcksQEG}=nt;cM6N^9WdDx2_EO(N{ ztMxHWE0qi`O_I!74&AcOHqA3ynHypA>`>U~pmFMJl0cY23Ss3R2F8)6M33N z`s^3|-kszk%Mz!cxK-qv><`UrL^vLjH@Z z#eF}LxC)U@W1D9NZU|!6sIxNcx2) z{e~}~(&6n{VGH%s@%4FaOF;!STNnjxQDTRg3jb9mF{8TK;Gx#L?NI!++r{s~T+XkB zY>n{F9-{K;?Zk;XWBAtp#(1@FL9b(@u3T^z!#N00F3=bu3p$L}i>%`G#!g zxIbqxs4CdibGAFGri(3ZIM&fhiMRp_<^DJs)(>?JIdLO~Rv4>K_~)=&$!XCPis>Ec?A?W!W}+v=0a1C+dn=P&PuRBc>Jz9GBLNz>&3;IlezJ z#|)5mARP0<_)rg?2ZTD55%W3AqvbUd!VOSN#PT`qX^rv=M>Q_PHAkou&a5TZu?v0s zfNRWjX`>49MkC>A3lhf*2h?lmTsFxXb>$ITq{jYqwOm;hA~dnHvW*eiyA9**bw-x? zO29TeN14xz1gf@92I%vP#`RK?3KMYnpsbhL1OcT0?s0&7S#h(fwo~`F243yWmSMZm z{YU6{wQMh9PBydCx&hh4aBy%yuZ+`;cB>E26jz5L;y&c_uC6Yf8dTnX*#}Fn$`aE8 zex$gpBQR=T=jW-XE2_ADv3`tic2CA~LrL#r9npP-9ISUO{_iTU@cU%AcuCI<$}~Pd zbv8Huc2TP7vK_Dlfmml>HVM~~d?c*d6TCe)WM#qG{-g(As1fym1-4>q^Q2FFM=J1K zn9EMkHL*r~`za$?aX1~WnY`QPsUFUD={;>XJKgem?)ny2^Q?t8odsjr zwaj=@`^)<372MfD|1Hr}5^9e`BOy$0+8IDHepCmr`_igedZ*#K!*3WrKFEy!VSBuy zRj?mAAmYsETWkr6NM4*YZjh3vd>JgF?}9hId-St zXy{}W)|52Bzy>80vi<7F3y6PEtJD}lG(*E%sXdFO;B3;H_`g0R%6lC_y)f9dQ1^6& z%BL1hiTfz%?7nRQzyL;J1SIqXoaS#+lG)MBKobRlXfW(vh`z#GApXrmyp)ob#&o9a z1j7o;S65qzhDpwn0kSGpI5(|?e0{n%4ak{X5HA-$K`qLsf;J6SWatF}(t%f%08tHP z+DEu_P>X%@dsYkKtd}@?tI_&5K_S&7#aA2wp9(tT^80BqO5VoWg^yX6db=Hrg6-{P&KLX~=j|N_gIA(47 zd#DZIXyQ7~i?A059i(qS_0{c$)ZC?4E76Z2M7EUhRNt2sh4FO+so)%fv>)=u_I(9# zHpHL<3gn?Y6$X%QUUpv@qa@&CK%PQfn{S;0RsYJ?QN7pQt*e@p%tGhKRxkjel_;Y7 z?b}#Zw-5plZ%XcaO@uJ5WD&g|?Hue$-31#l!!3;${dJ80A7O6+73J2x55rJH zNQXgprzl7`ba$r+5{gPI-5mnb(kY=xOP6#BNSAbXhr+kVI_G^~f8T$tvsl9%oM)cB z_jBiUUH9d5Gr`wlFyghpCKm7st~;64DjBWxu_7XDBM*=Z;Z}Wiy-L~2x5+enpQRpe zXE-_4a7z!~C4x0k;gYLJu8&w{po)JcL|Lh_ru}CZes4#;h-V7Z{u#ec0BR92bLG8S zz~|$)T?`z+3YjNjR_E+{W=ig|$yNS|Xtaot$8K3(Bp%1bZ0mdDy_Ao2FPy8!O?kJ? z4SNPR95(_DlFdG`KlWv^X;p3jt?k0wHU;d03i8SKhH(*HL01J{XdWizHLzsRFJKA3 zUO`;xy5{y}rJDxb@pU73pqRmGMtfX+*K(q|dCxw#Cq+oFD~84V-87grMq3efUm*?54=BM zW=^Tu85A4)-6RlywaiI-9=1z{<8=!wRi>hLezwK~PL=Efoy07_b70=tzCWsO7dAjh zO&qw<(=5F}%|p&$M66c!bZtDjB?!Ivbv!OZ6IHruxh`9Hcparfe58#Czs8ksJNuTbdSUFQvjA| z<+Q3lIOJ2s??`}^9NS&vxq7!U70mmq+0^I&lJd&Fo=i5%oJMvvx3^b7PHGq;tvExw zc@6Rqy9cwI-$}XA`1Xsh8^`@(IC*{?-={_fHoHhi-wQn)dt$;47#nvKi)Vc#N^808 zDe_Jgd9PHl(g2)5 zH$a-p+J0zx_s$Fjlx>%MlKX-4*Qa3sbTW$$fd*v;q{AFBST7fNFq?0?jyBO?*GrAO z)1d(YlGgpVr~3YtO;4eAx6OL4I{|@cjXlt5&H~YkEV94EG9XnmM&1x+pn49P1h;*? zm)(2L0w~Gr)Vt04mYA3c}VqqR*z?FShP9h*72VHwf%jL6K;ncc`8gAKO0?!r8Ec ze%a-J>*29*5MhjADLcb|*W)B!f-R+Ta-GPd3$S8QFrO}@Iu8>V5RVDt&Mz1*OYqhM zpro@dQxVc~BYaC6KOw)Vuwb$Cc$4isUExFq8tTj4+u;9zULb3cX1=b0BB=m1gE4-# za4R9ofryPd?JTNSd^~cmk8REqoevL>8w_d&=n{YSnhA{|W9t45e$4=LHo26bDnG}~ z{rOH~21hL^AYG5Fo_4ka2k%MzGO~j|?EwPg?tT7|Z-|Z`H2F>-Eepv4fI^1KsjHAQr}Yw z$v4?kxjXXV-b^eNvWBZ`Es$ptSla%;f3jjQf93r^XeB5C3sX~n6S3m|_`1-wz=&ITG4G#7uEI$yrZZ}h&dTQakd5KQh5_)#>Y0ACb>17#8&CBWcZfC(U*`nF` zC@w`m3!#o)2!bl&F$I+9HVi1n!G_<_T;|v}U&9;a80224VY-Zf^W90?YiD%>9jEkA zTW?pg2M^xW5vNom=t>P0$jk@cCpCr17Sguy(>=VtPEl|t2GLKzI5^``{1q~wIGb@n zSs|uoW=t$>?2Fdk5+@)#xm8{yofY86iJ@>~bDK8+xye{1Pc>Vfchej+%^&OghS;;7 z^OPH+j3dNlhwSHfE@K9k5}M3%80ns~&zrUFt<4 z=d{%gJp>z>DiBU<6b=IwUz^UPyBJ%KX4g>XFmNS58LsAmc*lzb6^iiyIQK-MIdkmv zmou&h3Ph+a-*}*r0E{}FC2G`Y;!}#MG6xDA;v`(FTbvD`k5rKA1fA@mx=Y$C^wG`_ zir;`%_GGwg*&I#=j^Tr(PL8De9u~(jsYyDe6%i3d0N21yCG1W|m?bDaH)qMqF_su@_sc+_%n~tpolATmP+{<~;;C}x2mDJ`0 zn1;Q9^JVP)6$o)U0O%&gL|`*|_*#h3rvRK$rGrJN7sAvd6B|<0xc!M$kOOy{|z2BE|>{ zg2_rKz%qaHzI!cYQxvkh=AIiw=^8Gq%0$TdP&qw{vk+K!rbtMwWJ0^oj`q-}I$K@V z&s+Lb%c8Q<(zw1(jd*U42FOeLP5H}7F?TIwufy?n5KGuMCYR50K z3JMxu(~Ycl$Q-w;R-!-RxN40ba2s!!olc`=Tni=2z%zeu{y7DLBX^qgGCTSG<@8p8 zW$20621#6WZ4)sdC@1fb; zOdV?XL#HCG2%pig)^|$4wzl?U9T4_6yDK3qV#Vvmy`g(h$FeM|&o>;jpj$ zS?|x6g5bjtw%mNqz54*_<=lt|aIh8yraf^`=Cn$j}dCzPXQ-5e*Ty-DA zdq!LzZ!m##ofF2a`EE70N^SJr%SWF~0^XDLu5R|Hi&bH>%nzm6=K6Ahxp@-9-bE15 zi2d^H#?_pVBKxA%jF$Ro?x&N&(` zd;%9)<|hyD=c}_91a)6O|3rK@HWDU6=@~)3%VQSBmlCD)E2KDm?2RUASN2?ek;9Vk$o2f17aAXptG|$d&Acz%Rd#LNo9bT4=Czv9 zFA|%GT^h0To_SktZ4AmK-u!yE^k11E4v{LhRVx}MC$_tu?mncGE~&r!;w}wVmzt7L z_KfAm#>fhe;I1d$I_l0k0C!VDKYamnZ0XpW`kXc~0uazc%y1tx+CeMFc)$j4_WCTr zy_Yqhk>A9nJkN3pM`)g3oq0_e;itv+#s#@ANtiBE;uX}O%SebU8&}1g z!w2YeKloa+O32LW`OGKb?yA}Oyiequb*m?q>TQQL$7@Yfm#^)X-JhG)mq}|RkXHH} zfrR#|o0(1UbDu##Ohm?`JkF}6S9ANPJXX>Nu~lEw)MlxyJ%9RB6n_MOZI6)hB0It) z2D+`|acp{Kp%?<&a|sCO)aaY+ej(3%AtH{~EPY%o138O2EI-2`Jt1=1PbF<1CEs&+ z9mArftDXS*6#J{5eow(T-<4>W=Yf3I9-GVUr+AVJ0JE5neF?zdTs!aUsIC+3Qy#S4 zvV!uN^b}pYbthMu;<_3@F2}Ig2f`X}u#ZIA1M`!d(pyR+7-SS`Rsaan`X1kpmZ?7; zfJx12#&K{I%n=?L*?ax%uRyMH9h#0lQvhP%+zluPM8)fJbGqJ!{iZO$JMZ`2{d#?# z3<~3vbR_fYM4|KzL=iNh>;+j?&>Q#jH3rb7 z>>TMCo%#5bz~~3P-U9jjWamRY1xOEAfP9R4hQLd)0^D2^2sp*XDv0T`7TW~$7nxsBvYgN1!B2J6N@6p(vZf& zH{^;bqKbwj`zD*a>s;>N9Eo0trlQp{HZIk_eF_9SWsA}=u_@CwjJ{qiRnTCNhx5yM8@a zoZj)SvVYMXzGlmSBbO430qUh}h&RTDHBJ3-y$);+9UgS(Q+Wb3NbLLt8g(ZSUK4e? zq@w(c@2N%Sc-=>&GVm+n?;UZ@u}-Cd7adN8y+EfP|2kpTR+}P$bI$JfmChUBlPZ5Z53r*E z)rS4aLm~SD{c``ZwSEP3rk)Jt@Atxgl`$f;kgqS(e2^A!N~PQizXhd1#6f`0c}ZwX zX;#^qE@?n8JD@TVQ4tWgX#gY~Cw3v5MjG98s`f-c4q4xQ;kApy_tA7W7jd8!BxXtY zs__ehXlQ7d#^o!IcBfR{X>(~@qxMOfFCWcS5yL(B4l1R1zJrZ!Pj8$9EK>?4VK;zX zFTs<4joxf-N8Ewl1nuKlj2*)2^t#do&J(M?4YxI^DHGuT_UA)5Zl<#zTJjm6>;VW$ z%;X-BEYaQ#3l8SiD(^^4&CYaQmRxpTuC2n}hs$LE_==?8Tv0tL4wECuIz9Q&caYrc zjgHSkrVgbtHly@!D6qQ`an~ltwV81CATZU$9wdV+uD+X$ZMGbUatj;2KwU&f(PN z1az?3ROBx4ST%yT<``G5mr1;UUqI(Y{YJZ1;o|vzf91+z9?4A1t&1tMP3M3jNJg+n zIWAJcXrsv%6;LPp(#%>@FVeeTC`f~NMIRCSLgWBBIrqIAX^g|h%dV28#77seeDH6% z$f7gN4pcu5xx@V}ondjDiK0Wn4GrCo@bY_~^)Y<2m2shjRqqnsq5@?tO{a~ep!}K} zrNqwpdJQ{hR3u<2v+`~e3{i~A#W2%>nw?t6r1m0*#f!sL6(V9JD(<4y2SCf&iv&@t zjuEa^!bz%qTESpnlp;{eWP}K5#&BuI-netlUyD!5Z!69gZL+DI+g5jr_X#an)R6+G z#XyRhu`%so65{M6)2R6P+*&pDT;-;VACw3_vqnjM*LU za~ke)-kQ-X`y})b=u{WtBP48$b$xiyU^r3n%u?U`I)b#a67j03fEdLHc-WbTY$aeVed;qC0p(h?u zAr&A{==;6*jv$0+yLvc(0_qQRT*8{(<;h)ue6-)jh9RDMu+l?Jd;RJn3nqw65qob zos~sI=xE=m=k6QgsZ>Wtzhcb({w(x@=siDxm&0bKs?=pvThwIHS%B2xXBWsNo z<&sA-y?&)GHrM9rNki87{k%K2YWvCO(V^7(yimcO8EDdDoMI%w_)25#N-wSGB*9ER zo&X?*7x(qSu2*0)k&iMGsM9OHNc6MdI*xdHLr{BF6eBmSHw9?BjdstLqbv+NJg%0O zYak||Dvt4TW5Acs#dUxr&mq8=Qg#C_AxFF>}Ez6c1B4cFHB=bFG^ zu_yYO_&l`A9q2GVo8wI`JZ z0)!Qy4H0Hp)=Xzf-f%liBjqns0<3nHi{qzpQ4Fv0_7{03ASJOq#Q6BRg4)r8$ixq| zkcpd`VhvS$j}jOVA5cX^M&=31(>~a{9SMlqF&l-zdW=;r2%1n_o*6uq{$pUQr0pCQOL(u+q^P zWJeXh9pb-aGOUfq*G3LNmIWzWk(Pd@SA~XhfW-J)BH)9%GSU!DKjzo10x02*KddvWRKpI+jAQ{ToFI zbaNMCYUwLsfjT!qQ5^Qe#})oxWZY8>#9$`ZB^jF}a94(O{kbYM;7(pX0syOZVGz^EvLZGiH@k~V zNIbsc!ewm_rx-Oh$uYgv4RqE`=UaltyL5i6&d)XakDs#?$X#0RsHNXeytqT36dMba zUw8F^(p_##iXfFuaXTH=c9sfS3c~9sHFa(bg@oBzd|4yB>*}K3kaFKlCLkX9#2Zr$ z$?W@^MnBAiiLV@A%I!tK;4j~WnCN_Df;HQ5MQvxX#R*{)|@s)eGad1-Z%F}DYh z#R>7UY2Fv?|7O5o(1Dx8STb440>^5KY8L=%rDYT}zJoYh+c62o7f!&SIJQ|L5kkBV z6u!CuPwknZ`5(ISe^Hw}ukfK57_s2g=RL}?UYnN)8WtzAVFLXR*g9nL8E@xL{Ii$&KCS?Wk?1(U6OR190^Cw?atW< zWu^~7bd>QO_cXh@EUAMjo!?jo(XssOF%t^IL)d!rt{d?3pLZh&Lr@+A4hM5InmC~} z7Z(@AiU>60;{Y%}cc{cvZf%^0MbV7?d1ubAEs0n`*8HGNU7*E2Y%Dw)E%GwF>)_(R z|35cu2RWFSg`S8-L#^|TMD=oSGp@Eio_=GL#D2n;Hz&cgG9Tx7d4<@-SUyMa*v?c7 zEYw;QQ{&^}QnvD{p!gCYPF#lsqvg74>Szk5ITo`x6ed|Zgjkwd$cgxSeaY@=4T&52 zN<8O>O!dIJJc z7$nR5fSFbFL)UMu+6Ye~u%YifccbBU0+t2@$(SQif1PMTNk|a7P07nq&B*S;Ep6sd zRz8Mb9_S~?h|vNW!fQo5{A5U_b7XAr%a_{^C0^reB@MR_Ig^WGLfQKatIXJHJ=?u8 zNq81To&DcGPi6;7T8r4v9zyWTpzxig%p$~)J{Rr%T?M!Mrr1a+i#DcgxB~L_Vvv@l`yBa zcYf^Q$;KFQk;Okn&i?DD{_EF89IwVkN+!L+3|aoLM%G$B4|bitQ~{Qi&z5UlT&?S* z4uh&{WG@OrFvYpaMiwMipoAPMqM~xKultqRbjZZ4pELWsM8}{_TIkaKb!b3i26!cJ z>VFue{|n-RzvBgwY$TfV6SjBM^qdt%XM5K|%*3=pOCw}3$dP49&OW#ZdByCp8PfQRYNfmRI3S z#gzJ46`e&xhO1(PC6@2|B-jT3$NBzksOm)#=I%A)vWv>46!rAA zG|OUN_ic3c%|8UUv{{0*_>9^6G_QV2PN(7i;P$_J@I74pgN71N?9&f4Ys-WCY0nGf zoVw$(vge}`bS(kDbwPjc8z4_2A|iSd;^XT&(zho3aqOok;P`)==dyeK^Zw3cX6;oo z-aR-7r_a!)m%`bJi!`BEk&aBo@jU@NC`M~!{CUMLtyIM*xZv-|Rt>X?M5*NjQKgM7 z?5`{I^UX^!Ph*TLr;#M4ACDG{9N%+8#xcz^(D)+uT3a$x7N;^i%YZ?~p|hoS(+wU4 zgrH|lpxLM=p_Qjo<6cqC`u7v@3*u*mpBCSoXg_}U$H#kq#T%<~i^5lVs{c{66_FPy zEuvZ8#|i%ETFb*VDoPGd9%$sNz8@iaC_xg$tk+~CX84l={(oEr2N!%k67D$4&(HTD z2o!c!K7bX=Q>J+qIO|DL`LQvxg7~h30}`pSlm%dnszA{5+f8q|lhzg1!4LOcslwir zMQe7MNRXgoVcF(U{3FBjw@aFXqd3RPH1a!SB=B0~kg>BwYs zIbgH)<6uCF2f35v8o6gg{h4buF+>~_X|TTF_OpQzlDEx&191I56^@axN8|f&#O3p^ zUT`c|@lRLf*??E+dRFqZa^B1Mplw)D!J}oBAzNdzPjT3k?xzJ@LD#Q!R2jLv_$UiJ zWT`LFo5~Eyt@FN@&2RpH&VgSn>=9urq2KYGt?|!4L-A=$N!C+`hKFrvb)1!&CzGph zRbMlmTy|jQpAIDY!5PLuPf`$0F3?F;l#RX*6c$y7*ULXRzFU1=AUq!bpYh$lW4*=( z^nM@#a^Isb<;Nd2Vtd6eNR=WJZfHXHx5p6KzM+8x1|A+#?m$TeY+(`0-*&5_*|fNJ zp7#j+GLpZS>dzAcUJx?FU1)xLXXnp%0rao6YgnC>t!2=5ln3P;8qT4|NiWMUVD1rfkW4ja@YK){5WXF zo`=1`985kDX1z7Ssz%NN=%mReO1l;V=8egN)o!Dt8zm6<0R?3Tc^`qfwkD~7!Hh-tLz>yM8@ZXL3q&Sw z%5POprCj^DNPqqLEgW_jiL^g56!c`C$BfPX28j6CX-trQeX$~?G?l1W@7sr$QZc#4 z-`{vV0(LMT5rycuY7Yaxh1vv+`F_$*rVB&LMnoNN*W@H0nf{#`|LeebYQas0M(yU$ zNVV|+x)ENH!P^b>d!U=-hj=DMpv6jn!TjwgY_=AxcM@HH(H`#`Pf-FD3(?@lG$ZrG zV)7rx?QaJjT*EOM&cJW&?E7`QlmcFXgOP)oqwkr?zUa%o2OcSZDF4~YW{i#~+>pCV zr->UyU{!uMq)TH2u>uADZ1m54{_A>S+=R!~69jXPuCE$_lmi{_QEgUsjzv46j9k;+ zI)B&T7gahQ9vT)?eSR}D-Q-Mt7M{Y%d!Hjfz$OEO?G@xXdNO&G*E-rPe$ZDtD;R|o z>VImMu(l3Fek^kz6(!)NQ}mo6T(9&qUaoFk1!dE3->$#183aE7uF;L9YBMJTk#=K0 zEz@6r`o(}{@H^_XHGGc8_wqQjk*tBA$;+4iRmVL;Jo#DuAo>zt{O8lGBw^g_`nXxO zoKOz?bU*hV{0F`_sY739H=_vbl&VHYkLr-2ckgNB6)Jqu2;g!mdGIy&3g)#fysE8*}l+HMrgc?X^DkQP^T2MUF1GiIa_ z-Xdnz#=~4!qaY?fKN(jRNeM<-lgK`qaiK)V6_ul`JHb8uT$PxxNnexLoAc~!`tz;h zKA@t0;?AJNWp}J!=Qc}=Oo@O9g&{z2L2dT`dgpFP1ZKcWVOzC z_x` zKma5MbYi5v+VAE8Q43HhW+;h@Z7n@iRaTDvitqAU3jhX`nR0*-$j#dpxM4tGc<^oL znaJPvQj`I86FNvE>#9G8Wz}-4bBhNCCb%0~w#S zr);+A{7^_#R1`EnFfcF;2rY*oARQf4uLFdvARFM$AGW(4ZjMI*&X)9h-SR<)pqT|H zC#Ty0IWYn7q|n3JhLiAHcV7V{Y5qxdzG7Qj+aS=yRst|V5)s1nt>on7HP9`{1Sr#F zpfms8y(?oKh?l10<};ICr$SSxtd?yec&@lekD|O=a*HvCjS{WG(NUdz z1xPhyHwL?ESWBc38d0I>EH338XMbH_p`%+v-Fp%Him5#*$spHZ@>tbKB7R*hUx1H~ zc}2zRpQkc61*WQsFLXR+LJbocjY%q>J>6R{)+>304tjfua6sH(oJ{D4NF=7p2Qy#m zT1rBBQ~_L6>hpEhD4ca+LWQpB)(*>>E(UdBkCw&6DE_Fq?;6EFaV{G9oUz zK7S@T(JnSdv1x1qN(5}FCApu07`|4u?fRPWBorihQos$E+nPdm;t~G^TD}<}b(|)iJ|P{1}AY1V!cLR4-qW6l)|I z80uL2br?4#uy%Q~Iy&L8qntU}A0Mg|ca8?XBO)~#&?GWylJiUZmz7dyD|-Be9#6VS z&bgy?cv$0+(dy(!4kzizVxV)H4G7DD+4n0IBFjQ=nZF&Yw8`vvG`Fxo^B{k(Q_eTn zymGdOkW{(YZbk8)-7=QL=9o&Rn}z`C5L`fDpr^G}0;8>$z@aZc-90t`T;+ZPXsL$5 zf&q9P8V*PzT$+a4CP3WVcpRr5F80NGCw?CAjft8#YFoO&R0l>@R{3NMhrd}Vqeuue zr>LX-iEdPI!;2u}&&e9z460EC4%iVq#6Y*;HG@@3VaBU8^kjX_m(3Eo^?n~c5qk6N ztR*xx*-g!sRW&Qt)t=*d9O3U(xLi2fbPV?zbm}(UU=?kSnAfjpX83J!a3fIo(!3s| z*}XtxC*5qkK@KmIKN-rkRXzZ&{_)}J0O5@rOcMvHw`y;M@iM{h>1}8@d}#u;Qhsu>L4@`mLLmT(&DnAD7J_vXB==v z+B4mew0uDQ#n;8D4c#5_6~wC=A9q|K3yWd@!?x87t44`d2O_bbU#mKGcD%5SeZ!a| zt3u+(ENiC9jAs^n<(O;Iii5hcjQ=d?8)V0E?7n8oIG|azdMztra<7B}SUHj#H~Hj{ zIhOW7vlIf3MGm0#uUWpb`*X?v@HE7Xo<DRDU^loSd zz9ZWDR3q(0MAm9c$)*?35jRipb774}AS;;~9svh?M{nzq{1cA!IZr1@903k0Mv;b^ z+5(8Ub-4q#CKwNp%q6w$0PieYYSx=UuaMt&**eZ-=zeBugK4<_sox0WC>w}tY**#` zTlAl`g>_%4@!!9n%5GRI4O%i>tGaGs*c8I7C9c**Z9xPK;`Ctv=8S6v1amTG>RdKz zlFWMIskqIoYv4misb?1e&NK%A?7a&0tEiLSDkMsROW$Jy-3$OSzFW+SsRy_VhE`u) z_W{Z;6F}f2>Z0qm3O>|l077SwWl2fNDp0#i11PuknK}WtZ+q>|7xQWHK<-WrxqWw# z!_Uu;kd#y&kZH34qB*B)Hrm@8GM}0Zy9%5rLQ2iyc zgYhCi&9a)zt$(f-7Glr4M6z-9{qUKM^NiJo1Mepr9wJ6|_6)Uloc>M|wAJXc&JDEg zFCb8%nNYl54`fVw7^a*t9}IdKiTfcfhGEQ!kgS6g=<8PR^4% zsW(2@XMdn7E*!3OKzzEh`Ek|Np4P7{JZ+6>T>@y&MTOR3=O z8zkaWlzV6fghWI^Ip6T>!D_6>Y2-7|(FMgFCZYIzSn~9)t*w=H(=zYGtw-lLXiB)V zc%{EQ>w9(4a8+RMA%+tj8ylSVT1og!{9M2+Z=~B1Jq!TKJ^A8WM@BR)6G4+wR#7nr zfLMcrgEK%hhG)K~%mJv(X<$%B@LoLt#m^IRl-mM1GrZdL8O^$s*jP#U90`w=*0wfg zL|VYgYWZ&aa0SdXIFgIG_c<)iVh)DmPCtS1`2eVWkR(fxwCpH#e`b`RqY|liOtAmEKwTNAuA#0ezhR4Bpd5Pznvg zGM7T!3BN0*o~1rHd2s2F_~bz_<~s?`hAPJr)<9|@UY&$H9$!*R6&;sO#eqb{gtqZxutt=0F%Aey216#wv}G@q^_=R%?phj_XbfV zqWRlXFix?oA_iw4pL$=;rSEBaii+rSlO6`N=)?8l5P&P7WxoQ@4e^LhfcDYaYPg7r zj_x;jIV}N18HwF&jfn4Ug;+A%$C zpgY1Y$K`lB7qA9QJ0T(!=_5E)U>(S1^|^y7U)jj2*2m~3+%|%zvp$C)K4d!CJvkv6 zzr)Aq=RLIYgH0JM58hBwUs zOrE+Rz#{bi-_j@;ikEk-{#r=cTNYAYSCS?|e?1+I?v`7l;>YNHqBd*u{9#otE;VXK zo*xh2(^1&kgZHnIZfb`jpkba}xCDhU#Jqkj6BP8=o`y*Qe2+lvov&1P8ayfKax@6D@GKW&r5`gZL=wREM#>~d;1MCXP zGyck$xbWQG<|fi_&4$<`r@NnU9s?i;^};96rn**76t04uWieVv_8v$>BBSTNY#bNh z3j~7WVyxj!-N~zZ`#wJSjxyNnK>mQy4Pc_?b@mQwY!>|az_39(cwVwKRi(BG=E$jA zar!y?he(k-iR-H+-vhmF?f1HC6{^Q&KW#p7eAQN^0Bdy8(Z*`#hb)<43?q{*Wt{XaQ zlP8!lFboEBv?s&df#=6-(r(THYI1q6U>K#2S=S$di5+K7s^a3eZaPYltoPSvcXTux zyrrE^W4om}aOL(5)$M92!2UdIeKMrnUe6xs&qKGa0TYV%E|t5k=U4@;verZvgZEbR z00%3>38+53b`qI90!$|`81#ig(oKF_TA*wtug8-aZsQ*Yv;cI@cgUr(aSWXwwFKe~ z0rjDIdF*{|N?9L_n9a?n#fvBu%bwxjpQvMG&DxPBHR*6HW*EkW=XrD?l9NjB0guBh z!trzzs9I*{IV&Th$xT&fSA`}|MZ{r`7n=l(E!o%_NjYF}Aa=^7cbEds&CQE_8^i4- zu(gLkcQ6=>e&6Qu8vrsf3JV*OzTvZ>-h0hR6enQg;6U`*H%?%#8Ics^rUVY(5dYlr zzfSiPJrLNGvpvcA-YG(uT7Sj8HC`-|fMTAuI#~7I(QC?Z3#DWbfe5OdfPjW)!-C;#>8O+k>($P)Bnrx(CkCtZ0fDK3h6qh_W6*g9|O28Pbut`>T+d42CSF{wJ0xqH8b zaz!baCk8;`L@uP1%Ec5%mT#IgQ^`cv*{JSWm&1r zNwtQOv-s;OL4<|Lhq!8auSJw-`1+$p6%<5O@^~|^uOZIf`5k!;RU>eeQ<50yU@ab; z3lAe93IgFh-uL#~K@2IlI;5v9fIQLq8kgu+YfDQ%;2*cl!DEabY@*4b5qB_~D3C{$ zRlhO{c4~HnnL-ldO01-p)`ttCV`Avul!!>I1iS6pGTEx& zAiNHr`xU(X>sbXahx|R)H-^dw2+lcKyqdjTb*mN`!dMAwqosJ5tTS|YJl6C?^R2`a zhO;=ep|>MEeSFBuA5W{Z`Vmm1iteFzMkgf5k*YW8y)CS>7^_>zHLz$$y-D>P^UqPm zB7kCJYu==%r)NG|*W|Rhp3e;rQuQ7Al>k#x6HkmA!7`I{+w@5CuEj3`wuCSWFv$nBG2ig?D6~kvVckXCD zHk@ARCCL}C@s!q31Rh0gyIt|0sQzDvD2*Mt%fm;Nq%Kn_im)=$I_6TJFOEG)fTnF_ zdt1Ku&AHrH@BJuu#6Sx4C|8T?(K^swj&Y}PoSElS(lH~his{iq@_5QN$A?@udA4nC zx_{*>jmJbq|LwjVFmln<07madnDzH_houQXxMFb-V^O;Qy$oM|50q0v=qMLWsX&=T zJQlj$Ju{O6urbCKA94=m{*pxow~wC^M69#MM1 zML%^a{nvW@d5MIwyF3MBlzU7}~t@wIggIggymF$?GInEfl9k-q7E%s+W^E}0j|^BKvO78h~R*ws&# z?lnx+zu0;+7|xY|;rKWhM1%95uYe4+ZpG7t4y>l+OTbo+PEKY!Ur9st@bFkNJkpr! z!3G1#m;nX`Oh%4c1x7|)UthjJ^S!D!)9Lka1NQXvbX2{4ZW5qv8_V&2fB`KDh=#rb zwOq>cIXWTWyocgWXIY8c2#*>Q*Dq^#zw`Brv2Sm0!*enxtsREXTOgPjnne9;gzs4l zY7Cs3eOMp#u1@gJU0SV-7c0V^=1P6!+&_9hbKYvk_YCU>)R3yL#?3p7^7 zUc7t$7|21|$^e7E3cx;H!py&|;=k%cutse}1`;g{^bpQt+Y;V$S$zvl$XD0P6nyEq zjRlcU{>P$}emX)TSR+X&wY5_gv!3jOcS+n@ZkLpDTPzoZ<9THK^J!}Mv!=qXuIe7< zb2i@r0DYL8ydgIZlf!8MMCnc+byJmuj-m(M!2~mVp!PmdYK|izDH*&lwF4-a(RZ$C zQ~{y#XuY>tS&R;a*8b>KaFE%s^A#zlk!Km9K~W`#dG{DVq2+QssMmiqJev&ot?_(; zt??NtZY}Xf7-}}$X4{phf2k{cA+jK!dXv+@BLy((_)Uauxe4RE9x~&M8Pwq+DjzK; zLjki37It%PAaUHJ#hMM4rZS7M1k-51Pk6DkhKcL?dgC%AG zX><&*WrGvtmN8~mmnRLKBO{ep$53?ibT1?{W;dG`7;0UzR5Px%+WpvfR>;TW z4ERF}pdzhd`1XUB^NV0VFp?l&qd?rw+}zxGv$*}xKsVD4WF0w&hlk7;(z3D{Aedv? z0#%f}p(<|sRb|uE_EdmO-W+`iLXrJ!H@NcT{E7oeGq^%QxiFvacs(?n=RpTtJ@6(X z@4n5El3^Q-fPfzV;?9mzQmZfK)i=0uLKi+2`h**g%Z&G~ zT9T8J$a1YHhB@A@&y7@~AXBDEr2%&dVq57l?W$9TVByRz(##e7B=0{9dqh9GzMdbM zOwG(3e15zoSIy0`+r0zme|Q`()5-0B6o<^kb7gMaeNO7+=orXuYipaEl_fspB!i)( z^!bhb`xD*1`Nna4z_S+v0`t0)w#U8!&GAwqoKeSAAEJ9Nw^`3Sdwc1y)vMetD#o;M$~c!LlOj!kI(An! zkl)2m7IxDek7FT)?SZ;9l%UTj-4H(h(=;wF_zWC&yPnNUgPnSGkFj#i+x08meYycP!TmU54uaw; zW?AuS#W8B`16x#P)Z{JeHhhS{v1GG8#5B+g0>7B;CpYDr$o5{d+gg7DxWYW3fN>kp z{`N|NH>R<9aA%MxiQDzmx*7z~7N7jDwRkZI()WosAJ5IrEpBZoB%#yZxfAPnmHP`ff1u z#Z$B2@wc@t>F8BBdM_76A&3h79e1>1^6~Qzz?CMr+2j-NDCoSV;^mD2(zuLwouAIvM%mq`;?;TG93cYNBcS$ym`^h@GiQL5D@b9o98`=B$IVj* ziXMI{OU?mxx{yZ97mG#+hU-2^w-1S&5{TTMh4QyIU9+k?BDSFn)#tNAHJ{6UJokPh-)P^n=3+?a9%8J>?E` z)<&O+&q0~&@p#6Wg9GNFB^4y8a~2GTZb@}_cfV)eDnbZnV|A!Ml&18ns}r(mL?v;* z(U(AJPlTN^%ZjG&I35A#erl=NBwZ|r66^Ezi#+(65gB`*Z`X@Q+!{3qFm0YZ7BN1< zGc1`0w6()2yU&lG(NHWd2|HFrGpXe$gUK1L+6z(bJv!InG_g0lma%R8PVJzGqQ14c z8O`6df_1i76$*-&kGoRf`qm{M3Ytx#$d*jC%F35absQ7$`?%BDI)ETA_92MlzJS$l zEzKK4LyKeuRZx(9iWyYfQ(?#UYx14?l)l}qdqnVT%Npx1jtx2_4m+`W_hgWz@R>Ct zKMM*@%NL|3!fTf>P6Vh0p0%w#CH9N1t{$Rsrh5(=*&ZWTC4MYP)>d4;DqURlT;dvh^M+^SnuV7 zbQlBZcFN$WLK}0}m-tVT!LWPluhrY#-Ojrq9y5GK-f>a0P6w*wq@2xm9~9@DE&(rd z{xygQgM)(b+?<`9f)GN0*`HtH1Zh;qESELRhmkZL!XeJeGVzQJRLwbzy)0xHkqAU7cRzE?L1 zlD4}=?Gcv)e+vmVaA&`I?ZdokdQp_3~_nw8h9GPw}h&VJF9~ zQ^EWBwKW;HbM^ZXu|A64-ri3HvFh(z4KtD2Ka`&Jn_{@ngPq1ns}|&{(5i&Z{&=$D z{TOq~C*eO~_CIbOH`fx8^+08``PxuJqTbm)B0vAxwc8j|#1PzE*|ehE%6xjh|0;6+ z3H<*!dU!Hfir~$(extVj`O1<(eF_)&MUL#m3wDGYoCOO83~rl9pUfw{yX!}uBY*Q5 z|Mj6T11PX;?CJ_>N>U8wri4{hD6(WkP*|fqGB=N1wnLhR<*)#Ce6u|{mgHZpz#sn& zT(Hp*yzS>r;T76L%FL+jb}@~S6T&3g_!JpGYA~i4*gEaJ%>U=7fSbJi4&3C;kp{!< zzHV3XlqzkGlV$~QrBBH4$18P~u3^Hf}r;#JiFQ~2B zKYC~Tc1B1*88o{}!hbc<)9R7Wv9C+krvG~O-Wkk-WKkEIWs-mOm_I=5zgNhJ9U|u74~tEKlI5N(u4QB30F z;BM*o6drMT)h_2s881rQ^iQAo$7!H|2wh-hkPqhQl%e2Aw^v#q6GfA;2pC?bZ&gkd z@B6F11WWbmagqwAZNEcptzMp=B+U$)fKk7V2F>2vLBqUUXVn)m7++J=KMe`y+%r6j zcNJOU8N?gD5|7%y--Q$VK+TY)L2@g;vZ_brtny80+p{tXj)wYLdV`$fr(FUf*Z$K7 zc&-5_2_CM)l;EIOxmKyK?Dvcz$pRjr1kS_gwj%=s))l-4xhX-J1U3!LdTXwxerVd& z*GF*I`o)Vh^65_b`5Wtkj#;+uhwe_S}XHJW3*z_ZuSS7oye% zmu{)+I?E+J!(Ws$s4gq$$FKSC4g5=Z@0kZesOF&*X`xUCY3KBf)m2J2y-R#k)1?94 zH9rokr1T6Owfp-gC;0&?v&6VX@?P)W=`#gV`^|q~P+L>b!1eRVgqOuabt|A{Ju)Vm z|L5U1p36aKi z6ksD5si+e03*RymlZxX_v3h#@Ab~Sx$p7kZaFGB021XO`ip186+DazobN0H`m^(*l z!9!)N^IIilf+C&`A3N0NwzpMROq#Ay8a;vBv@G<^a}seKRM2SU$@^HT-SonC91l#A`PQYvuW}SbF@RMOB0Wh z?W#%6lTSe4&CnRvb?DIBMj#oUjSy_Ffh!?`!<9RqGh93kNeBc*2ZM=|NDh4Np zLl|jUy+OL&-Xaw;_g9?NkH62YNASoC^&sgoT^#Ei-Udc(n7(uqb1^L#Sm?cGK9%z} zp=o;AC~l2iVlumgZ{O9&nIVwcj+vRh5)n=TXf!%F(gog(SyxJ+J^r}T(WNHDD3e~n zwKyrjAM3?`oz{Q+_8&9d&k1?VBr`8(>iQB{?RF1OQAf?4XX{e6OG`w#-ppB6^X6vD zuMe|bi?a|pEOG4$@y^bjUx$ATh^}f**4IyB9fU-c7$Ra*&eK_)MtNKghlGS=YNIkx zHYS2v$&gYy_TR$Z|N2_-Rq%)C4C&p*oSmom#)v{Iy$4TfsRp6+*B*|@*t?ZBGzw1z zv6Ymx@U-VvR%E^@xDi6*6aPQHzB($aZC{&C=?>}c5T#R;l$H*W6p(J|l2SS(q`Q>v zmu@7bBn1hPZWO<{aPM>Wch0?c4F7P53TwS{{_+V)M@RJhf)ap=QRgqNs?vFm5lykc z22V)&@Y(a{xsFsK|M?;RPglwuU`$m9WRe$E&cil1(uE5>-_6)Z}nGiyr73G8}AM zlcJPd-Lf+NfPVJRxMHG?0Mu|Xz{p48H|IAM2u1?J&vagISjYeH(ZInw11x2TkSHSo zMQ(XHCo=M7M08j$8M_A=(xL7dUsF+0aK*A55k>uBY#@)QsDbPm#*K@U(@Gau0BZ*< zOb834f~K`6Kw^UV_wdtU@eaf_4)!+ngWzPshaK<_qsiw!e;0K8_Y4FM{yV--vl}Pi zq(fb&71)-j!&N?yhV%V(sy#S3i0||2Xn+~5peUA!3nc&l_!K2!ddMKtmaDW8Z*p3e zt#BzSyi^!sFpJ2?2Z)V?GNPZ+ey8@ooWEh<8DtZiJhKd=?$dk4v88~&QbwSwK89qe zG%%P{S0-A{mG0B|X(qlKUqBrWInbwuZ->irz0FX;3Z7N$I0bh&6D`$W;fx#qq zWip2xw89(7o;3n?{qC6f9<3O+u*lDQQr>YtYd_WNs< zy=vA^_0c#km|y zdD!%{ye+3b3ObJOY6dH^mYLbX&>m$huVm=p5PJgM80PlXspe%2p~0&3%ZgF+Uq#{> z88%5rPq!(FstH$i0X{Z9joI#JYMRR2oM$(@a8pF&iSrgC@+N>0gwtR=cwlL$>`3|V zOrG@y86YMSL{7fXyak&k`ZN2b7FJH`lt+MvnvC#K>$fsYm*P@Yf-%^Xf|6iN?gBP_`V%KSz@F{4K$n%JDl7u=ck}du-Ak4-FTbM2=jXu4 zqKv&r`fVv_hX5eS39*^d`41G?m4D5ywsupHV3(oA2eJuPbDUPlLk=cT4*k%(1b-*QoGd7P`X+4~p-ma5<=Rx#Zu;PQoH9dYjuQU zThgwP{%0JU0g~sKd=SAYEo{ic!@Dt3xy{(uH@jb_LovwAd^5t9C_P$|t6srgqmIz5 zBh71-NNSx1Q=9b|&a%hB%_-rtP8~BUs>o?UQQB)biJ8t$Q)_l`PjJbYkMfP*X}_r| z7qr!9b?B5G)Li2dvsYoY>QvNg>RMdPH|5`z`KujMm4fv>@b{1j@Mp!Cotqm61Cjjp zPt6n1nh4Bo@l)B_F27ND0qr3r$NpMACM5MK40_=|8(0F|*J6H2b#?~I%y0IzA5H39 zoqIQry`jr`kfX@`g7}M`P}ra9g_LV$bz8Zm*7k0RquQ4ctB5zk^5d3oGlf?V$L$G0BkBclRc0%s-P>xl@YwCgU@;4Y~IMr zg$)-4HNH*(07NqsAm@tRv~o4_vF53I9OmG_00{+Sf%ToYh@6^IL}%+;G#v&@3PDfAQbAfYL*evN7mi1T8`b-xS_e#brXO^|KC0@}a z{@XpX3#*+k<&_?n%+!5<8iT0{$?tAI!t1YlmGbhsJ%k(FMSjd4k!);kOf(5ZYDp2f z7YDN0#^P1F9gb6g06Gq}w_ANSLh;r0_DW}GFHJ8)BdXWTrH=A!QJfr*XO1y2^4l2* z*|@(8jpbxz>DUrgYA@T{&Omr^WP|<=;+BU<)b;TQoa7IZcGfiJliv(n@A&BGWgt=s zn#k5JYVI(;QX1sw=({9)4ToOqAHE~(;E#kl-XM4!$64Z!ZdtsyyQ{ibF-G<8@B@=Z z0)y^SB#m?t_lk+KHhZw)%ogtz9sz+2nC<2Pcb|wdVjxVH1@IWDdNo#R z+gAWluh;G?20)Suw#i(EIlz-s6?ld8-1j^(djaNH`QO17w409sQ0xgwN#&qTSAU*b zarS%w&?iYUZ;CD0zW|mf9W7wIqCZP4BQm-}w!ly#pWq{O-Z*P}?9D!`VSUNaVjdH!Xy5r&& zMQ~dUnLI^6J+sXRTg{yGT@K!t?l>y3dYO|$BEmvx(*|I74i3jRE?nH%sNN{=D6ex0 zrG@&R%FXrVxl`*8^=>v1@HBbPR#~USTS^;M2);r&QI#8fq(vkaU-Gz+JF)#cK@-4|sEQBb%5HAeonnehKz3uDkoD&tye@T-PT^hNm$E($^pz5At|gDN1$Ga?E_%%5Pu;0J|(Qm z^Vq`Lvs20O=cvej9vFpM)f6{${VFqPRHyuAf_&Q9K?M}%*Tzt(UuE&9tO02^F& zYd#%f<>Gn+)lqsta`<9Ou-KAWOe`bQYll^@+9LW9yW9FoBG9p!0I-5b04k6N1fpze zV%=XH-B}ku__KKb##wdvKEKA|veFd)r$|+YIuizV4(y}Tw_gUl03!+ei7Y5Gme*IP zw%-<%&q3rj34;Ch&W6s_x8rlGFWr__Td%K2{NhsJp_C>}408Ql#N6vpB31L67~kPV zm&Y(?$-s{cA$k_bLTI^pH5}|Ry2L1U9mtfy9s0CH4|apUZ|^xByzZZ%{q^``YikCZ zT#hX|kYK&^QXX%#YTChY@fc0duQ3KgkL_E3#p(#zbz3R5$vReaqSnq zak6CFUnw>U&DFdbf9oQ4!zXq%RZ9H8SP2b89!Wb4P*dC&7*E#4IC$_SJ)N@Kq95ty zVpIO$8VsJ0E?y=ipZlf4^`Ov8`X7p9G#A3y-3_kIyBbT{*9mb7Q+gi&CqiFe-+J

zFJ2tMvvKZZ;F?>*tAwq3qRN{(50zQeT$}(V2PG_>Y{?m_Ujvgv+89L_p=*^!IX=zz z72%1rx5{zaYNWB0$iQp&e+GgT&oFv9O%{N>7~I|Utk!rl{WQeySuv7f5wh-Uny8OH zFLNKr<#2f9)b$4Y{@vtMi3S{x7jC~&%1iK?dk;o&7M=v%Q5@1HAjWs6VmI7NN}Bw- zA#y@wRM9muqKri5@}orW_e@8J6ukp2GjkZw;iJ!|z%KKE*aAL{YhS<0q@xG&3pmgq z{l4o1Hl-(kz|^m71;C6FF^Av+SnU-W!p(B;EL<689CBY72 z4IkTL<%8oRj7Eo;6aG)4^dA)gGNfW+g3(FjGJcbRTc}g9xJ<8BK~H#GN}N+%XzLXa zr=!aX-W=(m1InQSt-$<_{Ku609Z!^-BqHZs#xFHcm!s~6AyX~1{N1X3=A{+=jPNn ziaQkT96qCA*Zcz-cZDPvWBOx9*2U!0Q=btBYy>PiSO+?AP0pFCwq!rwEJM>WFIeea z_PypD2p}Bnd65W!MG;wW;!{BDzg5v$P*|8vm#jg|vKr9l`LtzM+nbLCH#!x4tk@}E ze*hcRjVb!KqB&Do=f9T6hy)I$5!3(xsw zfGaP3KcQE-LuS>AhI(5pf%&}CM}7eW)}`xp5hV4Lk;yJe=nDy$Ef-fDz{FC7Ybe=inR_3P#p*>!Y#c<8R=V?*_gMWn2KeGw};tQ}cl08Emh3ims^zio9 zaB$!OJOQ0;ZQ4BQ?GE=t$)a|T2She)V(JSP#v4K#l5DAD2Y_4`hM$!+L)hS;^r*8y) z-z})Dv6*=ybeDvraH>`d9F_TsF5cS)i-#KiG}-RhS6^l1T|M9Xj^}x|w=|0u)X4{E;r{o${!(qHH{u`+Bf&=&NUF8;QcFOJp z0G5K{FVN`4pUFvHPA&_i4p#om+*fj6Cu1zL9O3^=(O_jrZ8y7ygG$=JL=N-Gs)t|fl5#+I z2F;4ZYWV}Z*hznN&FS2*O^t-Nm_6k3%uG(=sM=}Y*lu)cj$;9sT$ z!l5cPG&Y831KJCLr4+!+;Jt^eLTZLHw9B!MK{o2{z;#-^LmHZpl|_@ES~q;B#kLP9 zr?bu8 z(}-}qF_|dRWC!}m2f>&}?PUP@?Zf}0nV8Cj<38p!A58FSeD3Qje#k_^%*@UCF%-lZcWe zCN=W!MF985VkyWk3o>}3j}OsH5x`7iOU50aK^?uFs``6k?9P1%sI$LPywm1gDPr|= zz4)6DUKIIlb?<_qxte<5VF({>R(} zYWhuj5V*LwAdJKI_O>)FFK;Zc3Q)~QTe)U^n#0}r#b)pGd&OQme#_$Z?jnwtn00~it$LQ{G4v;$xMXtc0L)k?o|S>1a$qhFlL|5Az=NT zoo>ez;Bnv(h-t=}Ty4yWf%lPuq45TQFb!_k+12{-D~hQG2C+6ctaL$$rB)y&3DvY} zm3!2bN*yypvSJM~;!iU^0rs91lFC#~`_2~%yFa1rCsLiAo{Z3(E*(F9yP3?Nj(Ld4 z>T?UxqQnh4ZwD9;jNdG#ak_s@A%hMDA~Xl2A$<3rV{d;?On}Mg55T^%aQS)R*aSpO zCK&GB(=0C4hMYMWB?BrO9aa(SmfJGc8HGb}sGk5l!%i}(!DhY~SR5yuQQWsx-I-@g zcg(G-Qe9wmj7t7)HC>YEbOTEJ+m$Oc3h-}|+`X)>aUPOb8(Ue;jomZ6i+anjBxZBy z%W7uL^9fG5i#kO_vVCKZiOC47xxK?+3@jal_+KMqlVSvb1gqq$1(=aoug~Zk!Fn5! z+-<67V7Xr{Y{gDRMRm_Ins&=4G^J}w2m#C46dDTq3OU7Q^5UNQ8#T^8o zDJm+GTd>}k1xne8SAYkZApGYTz`?r)*imRl8iHnwT(s=xvh0qX>Ysc(L=f{k^}WLn zK1WI9+W~|PDBD8>Bz%VN-k$H<&mYEy{OC=F5)_Ijmpc?SOMk*wt;tIOR23l=6&I2R z8TKftBOswF&{38_o(E%~du1IxmS2H3$qJMwiA}4Xz~K7zlT1}0KyRmh|tI`sObdI166r>`5ucwaecd` zmQ=`@^!Z<9gE=OpSWVg!%0+Ib0q*HnuMj!BJ5#_i41_-x1VitW!V0mga3AafxhG8( z=`zp&%)b#`u(3g1PA7Oaip=}6v$WSB+UE92!N*&aB}05UQL-UZ}sWe^Ixhg@tIn-YPgTlt-X{A8mX&=Z&&>xWbud3buNJ$i)V z*zV-H5`YS-58Wk!op?k^;H2Xf^=U9(H%jSl}k?z>>DdkrkJRI zg~}~-wAZ{p0U5bpeZ4~oIwn#L@OH^f<;^pnDqyre0awse!tw!-hT;t_^Eiw zSJGqvuCOh&`Ul`|vWUR^W|#hkQB!r&Iz})&jsMV}{(528W09KyQlo8rFeE6q%b#?*pKPC!o=KDyn6@3{cy>Z>hPj z)FpH$8O7Jzo|fs*21*KU1q=arP&d>2O%?2>3(zCH4x&Fn)^J#WsQ4R_;=uEajf;!; zM`=@N5RdAg1A2taW%N2n@k0p=MmsTd?{VQfWMsWIQVR&!1v^5L?i3!s>;5LIRpi0A zJZq6@C;g}RXu|Y25X=EKk943SZsWa{!hL&YrqXV2_r(mLMDV|tp^4tOpmK&7K@X2#(78O_h{ zx55lqB#eX7X&YQj6lC@OyTA-diyl)JpoSre=#h2C`y4PhW0-DQ1+OW4VTnQ`egNh(W0+XXhCddgyaw?8z}CuZT~ z%$K=#e?2le^w_LqYuu6s3@`O&>L2~a;(#eyfdlY53;w}_dh+Pdr3}S)3jRrBX2=*AN3sXqu|Xo+(os9f zPegT$CJ+M%nXYGfc)&pm256lLtM9$jrZ!7C(fB&6UnxwabGubvDEbH8K|px1Klk}w zGxDd%AgVo}ov)Wl=NI{aho?%<=TIOWD_1z*da?kZ{c zdG;wvewJ@ov6P>4sKxe$*UMtIYnhjBiN(-0olIB-0SS62Lqp#*V4h&5-P;iz?S(>s z^1B(MzAg0IuMDeaY0NA^g|zAJX3i7!!U~^ZCo0l=;JdUUTwZz-`UyONJJ^>l)dxkW zA@83z3tvhgC*LOZ7-z5G`QtOypG4E`F0QmRG1AF*-azXC9>A6UrYu}z-#pvW;#1sA zs5ve(uUCi7+ooagH>m}B-|{qP=`XeivGEq!GrDfy7;cp~ym7nai-{Iw!*;ijHPh#y zwDFNZNZ|#}el$jd{@2TM_K}{q2)fN62tl`saQl!|C;>U02*O@+7+HcX+E-6&UbN1Z zeEad8S}wO&T)L%qcY5f9mO!5;^p~-iViHri_C|h}*}V2M+Su427dwwS(VTC3mN*-z zUl)tOo@XwZ2g9H?_RGEQ4*ACi$_9VaOibeI8TuC@%miNIHw~T=?Qao;+uEW`9v})n zpVaVb-~LcV>NMOB!iaA!knbi3vP48ime^J<(Fh2|`leqOU(rrMA9P9$jEME%`{cRt z{cYYIiZ9bcB>5&Wku_f1p9r~pygxps9@XJ}y}SG4)G+j)^nPKabrSQ6mwUo`D?GQC zQzy;8v@|Tm>5*4Nt@7PtXj3Fv$Mf$>_4YAL_7Li#$z#ho?Jra)o&AzXC2s#M^it>{ z5r##>MNRHeatnrr+na)x66a58Qh2kEu0c-S3q5oe7jjH$4wvirgWG?YlHng&N+HI3 z>iX_t7IWI@TNgLYYpV2v1UVl|H-bJ865&hnXKU@mek`@U(W`m!_^sHl7puLHz*Wlb z2m~h^rU11Pj5y?zx2*(N8VU*utNoUu?@o$)74(Mci*nza-Rui?M_~~K`6em9@{4FF zRwD~weV)T+hQ7P8h>w?batIO7GZ_Wx@f1Q1U1bVc+_`<2X~KhJ zoE+~*5*&36nBy+%V_d>+9ymZvLvv^QQK#0G<6a?JQ4vEBBJ?3ekyO>YXYIA3;#+2+ zz1@2G>Tv`9U$+?iXOe~f7CgvFmt*K5w=0G+B}p;Zo8N{3CK&A3@GuuzlMQI)b3_fN z^e5w$Fpo7EdawOheVmya-*+|{8&m?rXfj;FL0}u?RD~jvDnY=T4DSfI2r6|N*{{8B z0^1lf|9ECiT2-Bx%9<6Er+?2lF=s^*ev4#R(cs*&;mQ=isegLfz^nV`)GG&YEnef< zzZJd2xGPkpfl)RqPhvXyTAwB>=YIY;3HXK!2(-`eAh6sTBfnC4jn(uMj4VrEotSxGMcB`IL!alb7 zrn$r$eg5w??!O=Z?~7NY4Zq;kO7RjJFM?oRfVe6a0hDr|1PscvLMi;Uu-G0D_sW*s8fu~IS0e7Q5iHjRl9u~qBtmqnp2sn9b~6wBGz zBp6#-N`QRfsn&{!o5>RNXv*<0gY>(uXMe6O z^s-5r0UrLLb@9OZ`mXdykUqr>dd2?^nFD-jaPn8eY&bQ(W(6-Nzszs{ykEFbZ*P-v zA616IK!s@Grc3SK$!NvB(+}7E;L^Dza6U(sGW4eVCKV zmt-Hmd9U&Az~lU`uwtdWvlR!jIH`u)o|qwPOyKouTVy`R-JaCOgjh0m1VLi1c_od) zFOOwV-KITDt9i9+P}C2Q^YiQTqNqJhpMUyV$ThaFST}J%ySN=5-Ze1i4?{Klx^d&| zblgMieu;8dm|VuMd_g6iid>pL?0j|dUo6_GI9Py~zBL#(Lm=VF_o_z^)P`a1F08 zP%I!G`L&^e;zLZ7IM!SK_@O>f&tvpPK@}_7dv3rZYi(WZ^t&p?1Or3Wd}IX{7T9Up z#55wpD;%mV(G3!oC1;}t~^BCZ8T*FG8f6s2h7OUtR#kcsfBwaBtw z)$z_P9iRv&0Ib)>05qvMFE2RL=S148vouz{_q7*^oh=|&-Wxn&eHM29@tALd=mAw?_u&f zcmE;58Hm_04!gml`yR4{86_z6%E6snxX9^qcz<>BR;gzxVJRcK`NB4VAb>#UdWv*)BeP7l9gvvn}NgQ1XX78O;vS zZ9kvo$I*UGyDZ%F>vHczeTzm(_R_J|POZOM zowWW+&^2gGdgzUlsdjd%6}mWQDAfA=lYDHxw2+CU`7aBtG5EKqd)cAL6r^M2Qp$9l z`s2ehgY5%Dx_oYX8pxrfQvpejK!Ce3()Kk>1u)w2)sSm!Ui%*2R(({8AIe%y{bk z@^>*|&5;w{ou`EPM)4Tj?T3EU5{hSTC0apF#~54njQTGo@*C=8tT3HiUDdA|HS12o zoPbJ&B#Z+YjFgl=eE6W(H##H;y67>mbb81XSikI>*5tBHmo(QuI5^>@EJLlNsF)ki ztfDS0EiHqCd|t^I0Q%}NAO^?6uTh(>#pxRP^6yOdkB9#Ix3FQTs6G{D~i)QBIS*1~exLKg#aON@Gu^(w*~Q_W$cizd&Q-J(V$qK0is$i){PKGji@l zEZXfwdL??ru8q>rR=DPBW6O{1LonDJ6NrixM{WS%TGP%E8B(82;m8yg%BDNvMCODZ zgFy45I*LNfpYrYx+2o%`$Fs$l0y~-4a!*))KXp*`SBbvr>H#qZa0jlgCcrqf*cMRk zA+qi`z+h|v@+~w4V+(5Y{Uz*J)dQ$tiO&mESvfceyH@TzmX$@j;tR1oH6LK!l!RS^h z2zd67iW(}b{^O@NDL}wFembM{fyZ_dw||H`?@P;(edN_;1BC#Oy~=|P;nO>4gHm@B zp9KviU(J5lWFR6bHM+2|8C}*~(y60-4e-j*qy}v*uj|fST{-CaliO07rO}_DVVo4u zA_~u3{Xjr$Nfh_IBz!*9kvo1exlfTW^#0ZQOGf(FPPotoV?zS=ow8v#)Jc5Hf)SS% zW@cl+Io^)QxsT!h4i{E}0lir6HebjPdeIq}5-syrs9IX1FbfV&T(nSP;`G|mgPl|# zGv>&!Vb@kB4h~ zSQ{RhP*;#2%Ck9Stue&70_u&#eL_Lq-zR$&$@s{vx3ew6W@Wi%2qs@A?^J#G5R0RA zWl(>KdsocUNGLNg@$n%#Yl6XvRRnPJ&uHPvY0?&l1UIdGvpw9kj(-VLznsGTPV&hly z^}RfjuWUUdZ)|8_Ne0G_>EgdmG|Y`FfLV7OnBZn}SjS~%J^`9z-N(SP6LBPkw`=Re zztC1ef6UFz0cJ6RV|udr;n7i!@2@L0pl%TEBPJ@!cAEPG24r5W0U(af>KiaJTf4ji z0J~IvKOLXt{2;gM6ZCfk*j3Y|>+}6Wpm{bpG?WVs&wZ~>wq<}WMoBOKn(swZvXPhk za9EJ*dR$zbG!Ty}YGD@#2g7JP<;x0KaMgGmnjRjzfsInER6bY$n~J-CaYO}f7FxEkm`3~#800SYTQJ{qPNz)Q_T5#MQW(d`JPq;Sxs7rb`tT6s7`}}<` zU~5o#I($2O#FUbdUZE6W{LP~nS(b(QPEnC%fgGp=CMNVzI6FGdTfUFJJn8#rx5ky0 zH%!nJ$okn{C@*S#FIz0p%TVh^ul~Kq zR-#Ckj5f$5+0H14oI^Y&wLH=%s$?Hw0f4OGhywskS0TU~Ed?2W+5Gk`Lb4oqM{eCI zSXl`M3bhdcjFL;+#XWe5EP#sq>TJ%k8y+F|@!QW<&-<~tC7ce|dK8p}SR`8=9?_ke zVEFQmew_!{H!ps*c9(6S6Kc0`AHY-IZ$Tz{+&An3_YPoSjcyIh9&=6sL_*!1rw3>Z z($62ovCRjv&UynXM9v7b76CJ1q5u*~Nt$FpZsiRFTAwmBxi&&13}e z`(FbzfM0*}T0G)CHWZx$bnGvX?54aJY0%sK`C3I=(PV(QQx(-Wlw$gM%nQ=hnhC`z zP(YRp4-c;am_1%9Y{KH`wXYPCExKNgE>2GiKqvALOi2II1SwO_%912PdSPjAZ>Q1s z>OUh4OaWPX-aW^&z5IdK82}Ky!zQ_(AgMN>9IfwshWH!wkXlzd4h9CDuOlRi7+Y+z+~(u5 z-j8=Uk=DNbiTl?j<11H;^H1FKjX=;Rd1n`a;7J)IiQLhzVetWee_%u0aq9Q<5c*ik*7O%V{{_c~eQ26b_l^wTVP7$x##c}%X!As;jDRh4~e*a3C zFq43z`VVT}B+kl-^=$ZvY&*4tL|im&$Ku7xwd?mUNpCV$In_RR zUaNV1JMI$a!UJW5pqLl>^9~twv&elxm?*~jz(pdnwtz{6xe@@f6Ws2^F3HxuN z*;!FU-tfG>hpXP1DkTZ`31^GK=uS)>d8dNp$_F49eAg@PG>5l>dg=ye3Yf`8};|Of>x9@f)N{c-)?o{bgfM*V#I| zwAzxcd$W%oKI~_&Y@u6hc7L!i9ztv4U&JH~hSlK+nRGOP{^mnTBZRDy!>XlEITPRV zaN;&L@CCn>A|)c-qxBF8K*cBxK?5{7M_sIJnH9sJ zZ)5?lE~Q5>NY+>jc%Jb{Auz;jqrpUQ9-3FdBqLb)7O`OHe@wpqGKIMHa`2F)iPWyu z2}gQK4_}&s(B+@fNg-o}W0x7hs#>WWDcscbv(dELK%j-qDkv;8f9QU3m+jm~h3^)% zr>yOJLkVnjr!TY$<6-Bp z3q78dM=6VTLCPJfS;J7#xy3Ta-DUoxzPR?0=S;w6fsY#AgM19d9`P(Q{9pqxrNe-f zZ1EW8Q(ELBT3ZV7R}Ei)9GV~@8fjHkCOOJ8_)gO(QeL|77=&M4n4Rkb@gv!`vsE2R zh51Mg^n_^AHdeIAy(GXm4Cp*)Ws976;JrgOBH2~O#Etnq`}{3yM+mVx4bBXCwk*gH zL0x88(m{Y-i7Be7voKDyn)(paDrS?t&Wb=9oAjaZnKZ__4AKcuTA?ex<2*wp>XPXy zibikBohR7{%77sN0&ykUTV%kElTfS4Bf(oN8@#o#iOEK@sw7My6w@#(QXKlr73QCx z!9^^vjqnW~D)=QsQzID@Z5dMS7@mWs;w4Iu*l@5Poswfz9)XXae!$4z!dhlfzqso= zSHkIZ__QG+^psuEK{d;htCqkHYy9!%8P@Pi%U@Mf4zzxAqG#rvYy~WO4B*u-h`$r4 zcu7EIKC&#*Vi$1=y=%AtKJ&qpYDtzTU`vzO9!Bf#N!KZoo602eU~IthC2l+-a5q zn6WHTXpuVWV7P-lY?+c&o%vP^&F+;?oc0kBXo)`&9)?!n|r+?*)I z>h2?qru}8o0v_wHUoZ`XaQl~H^`^}B<>de-fV}-HdNm&0^9D0uMN-U-N zmntq{MlDu@J%pssA9}Dl7d?K<*nJ_up8|Y1`ZWQA{_Du4>EWEXQZJCD8#xAk%lwmX z@QK(od8+Z$>B>pJX*D=Ja~3=c#5?Y`QvwzBfTPh>lxY1x@LvB^jVbWXEpbS;$e1l z{yezjyy)EB$>k9VkEyJ<=McZNXC&)&Iw6ss?#xuIPb*&>#<643w{r#!t`sN%K0<%{4bBTqUt!m_j9*bJ{=j=Z+Oz=`XYT-yCpLUo|2}> ze^n3W9LCMTz%XX}?P#Oa;vFz>4H$mB0*bG1dBCLC{Hy005Hw^9M!|qBbO6~x)8pE9 z-NUDyK>DLPh)`wx1;k0&{j4k@`IPNwJVu!DynhJP`xKCPaM#OGF5&<`+U z2&)zNT%S!;J(JM|42e~-og5cHY5^CW83+++WIu6Gxt{-Aro&+Klq(iLe^73}P35BL z^4>I6i$;2+2>YGm*a9w0RNz|{Gl;a6N|zW=E%F~P9X#$GDcG;+%{#n?Pv~-Sw>(t{ zblV-1-(gGJmKn7hzI#xy`uU{7u$c>F%LR^w3UfnrPF!0m?>H6<591=!*Nk=hqIo5p z_e90h#gZ>JT*5xrFSCbas$3_XD)IKD775NZcveo86IG}bT2T{t8~I*8FQ0q#mP@%i zZH(0dF{wMd3%!%j?6O2z@mL8sbLY_`Jmr=3X!x zt-_%>Cm9(aA=%&ioTY;C;p5RvvG3;W*3IP7@2rA!dR{z!L&=L%zC<^y#7zMHUObpd zvDGOiap0lZ{?9N2owR@MU9$=2B~vH)3xk*&#%pU_>0(FXJ@qkpmqiA;*| z-$ggVyi07x^gnnNnb9D4pbA_6loQB(YdL)EVF5e^qoRl=5CS5I6cm1t6Rp4JM0e~d z^x@S$3R_Ry(Hd-`Tw2$}n z`UkNxewRPP%$xR43z!n$xQ+G+czZ?{ADa!KRZwFW_O2?{JBWW!L6%&I5Qe3t-afIA-DZWYfDTYz95p4dBw= zDr*HR!|NMZqLIQrYg@E#Mv!p7B*^U1q6Qq3WFX-uC?5;(gM;9*ZGhG*7IgX%*b+fv z-Grcn&3UmQ1{c%Ey=*N{-{W@-5=e3Pg_|YFp1& zy_1>Q>r;6&B(nc0=@+U>%3W7m@4|IV`8fo=+$*j1`0e96^)NPdJHcPvd;l^~I? z3QWY+TC4RD`oj2owu`FOS?AJ);x`VD-%3{q zes@1#$~{`$Y#(eqCeexo3j(=h?C5kNOL6oVNfc)Pknf<`oJ`MewT!5$;yI@DWd?>W zJ9WW7cu-eq4##A0Ql%rnfgS`t8hP&fOX@if(bGNvUl+ZyB?`)W8)~1y2tbr=Ke7|8 zG8^mogWhUq$cRj7Rp;;tPk?tWIQ0cFIWMM5j)1zA{6d{wB1|e$6Prqu<=|+Ay?V(R z2)kzi1uO9$Wy_%?Wq|c%0Bei{Ao({4B(ThGeqNRsqRE1^zyQb?>P4#Agy)H=30UlG8$-m&D_N3r^}mhh%sGbNM=RcG58&2p zc6-5qbM`WE&9SRlwz_xP{3hr0zMZR;*77$&PRe@~#=7E{JMN4>VWwX@B_0eSwx;v@ z$2{xwUQUw<(?QBf00!0bXe_F5ncOI6-#G+I68VRilP`Q$h3BfQ>yv81>&9)bbj|5&9yU7d{TBaN zxAFE~%Zpcl>sVqP{6eS3Dt4-9)yRX*z8BH{&U{X8t}Ea4*;8)1GgA!c>QEy*-1PqV z8WUk5~`D<|?G}nV)3O%eaKJ04#mUS1=CBi%3C;T$-;iYAuPEKSNv2 z`ccumcT@^$bV5!&O?PIrfYrgWPE_rUlg4!`lU6Kw0(N02LM?U`*F{+E6YK@baeH-vQ(AYio$1Hlv$NGrwLzU zceYL~+@ZD)|C7Ec7L;w}jFDcFKI=W(eS(&ulkc^7BH-~R>F*lxU&W$7(o6n;ZX*8$ zQCv3_ge82aLs6poo#)bi&BgjO&wtVaBNRok@BZn=;$hGie07s?YH4_#(e$}!owHFt z1Ded&MLG(JJ<@|>cMZdGB^M5?;D-{~!-nd#Xi0v9u7osMoQLPKitdO041bqHcV=`l zzgpel*HTS3PGI2eK1=Vh{wY~oA(4eanL;a`nQEJXy6w2^WN&_$lNOmggaoHFt8*T2 zb_0Dlzr{dOl=os_?`XB#k~ObN+VmX~9~pe`cdMnAM!ZMcaqom$;6Y2&NgMLC9AE4Q z@Kk<5+UaWL%%~^$N>ELZ7=KN)1wWV`2((Qqd$ra_u!4ctW=#etpV(DUpI{0K9{T-0 z_d5cetdL-Q%qh49g8+X#4KEsc&w8S}z!rc~qkf~}pR`zh%1lJYB%V#Lry5Qd;-5Qg z9C8CG|8v3Jg7O&xS~-IdIcp#%NtTIBmkH=z)6de{`pYzn}nbb}?B{n^gOIN3j_nYhLnzpd-mx^bCzV{}-L(5hzd~%mn71l`lsg3$OZwm*|9er&63%Fg$2O~Dw z0&5Ew^(1a%E|BEqMOnbqOA7@1yiTE%kX9vFS_c5hYub5)OTLZ;MZ%|oTCn)jhd|Y7 zqVCu<2_C?RAo@UW#vIVJ?rYs#e8t(2m9oU9xIcX1H-7e;3{4EI%h)7tE^^sI?}I)i zxjP8CLagGM6H48I13U|j?$$7L z4=prww@QxQZ}JNj-`1?#tuRfoZ6Vu7{DusqgAKn*qt;c>Eb6+Gur}H1?Y(vkgu{2UH%&uhPjx4Pjj=&|bES zFVp>?Lw?`!=DIt`;;&P|5zs&Koxy3+OP~$FOY-J`Wdu~itZ*aX?tHbk|JjON>2aY? zX|#Oc?rKpF2m$P|5`s~8MUqS-hJ9rmZvIv9Jb~;`39(fE6elb5%zbdpJ$jh#(G1kfUBQ?XEnxcSGxOJmrzdY?bV+y}vV zLifd6K3slDTV9*b92>~+%m;DYJUn9sWz2vPdrvi!Y_I7%aNe8=u|)SU_P0bkLGM7k z3e?>!A6nSwwdA;HzuGC49}HqQlIx6=>4_lvQ94iA#XivDc|4`#wBG+La|krSEAzBw zsKNIxUMLy+y(Xa~3#JFa2S@yI`&Y@n{d6PRVoyUjD1#$r<~R8LYpC*k zf0Y_Rp8GO$%S(E(r@`(Rv#~PQ30ZVvqXExvjhfxj#@Itd5_Cd=C3Y^JKMQ;|0$vGD z_R($#Vah(}btoeNtj`dzqkm{Ot3e3C{oSajw^8`dSV~SA0U3ihX>{?bWF*aZJGGPIMcJ+W`|VvPg69Zb z2opRg$u?u7RSG;yfvZHtHK*xDX+cQf&{g;JbC^cz^fg(fT;+&6HVEK;2gx&|ZTkDJ zkRd*k@4SOUa26Wk<-sHAf#=}w{s!;IAU9pF^=@BbVl z+p)>sBU@(nCQ$(ll^7_FQS(YZ&s*7~&WLJ$s;4sWYC!AwdsI`P)rkz8z zj0R7|=d3eZcybKdD1GfYgwh}orXQh-7|jy>L|!L*Ngg5Po_UmlZ&+z&y(!_sM?WJd z*=~T1^SpzZp{4iwpQq#<_UCLiPiy9s6o5_uC?vw@k}Vo-TzOssi0mdEUhyehb$ne% zU=9z+>&Z1~u$R9f3QM8a zkx#9l6ub^&hdop#DvvRKY0UVBkg?sBBTO%s`*f=GOL&qf@;X2y#tH&1G27t;P5L+? z*;>ijKZhWVIxk^MXvFPpTd@ZfM@sb?MB(+)BXex_rY1^T0_4735X<#`v%$pqG(rA` z_D1M&zU*!bk8k^&dVdbu;hJ?Eb?JH%o=_ml-nbc zW$1p(SY-BXDPae8Pz^&1%@|V9P?PGz05Ls=^QeQ|>tzbZzdnTS-6em* z&Rq0+d7q7)9{bY*i}IuYZjM(BVe1rT{_@4mx9UGN>#yJNvR$oKc66P6p6h1+zM@3* zcq`Rlis=HHy-_r0tCjPM4pk6a`DuPvhWLpVVIuG3bY6V54xOkV(LQh@Zo*p+f1dq$njkA}%J-!$F{PI4V-oQc<=4uB0!qd7f&%oQ-4u zXMKfd6NMG!)y|y0u>W0OK*;j2a^&|By~2-fF7)PZmH_;F@&VVEBAgpH8$=h z^epSm(8nHiA*!XHBZ0df2V3*iz=O=isNjAYC^{T6NlFsd_;Dz ztP^`h4H|fb)L$~;C#^a_C8GfvI6p=!tYqqYFbC^Dy2e7{>i`iuS!l(8C%q)s>3w2j z8Rlb1FDYcztNIr7uBM4tZxZ0>-F|v+O<2`dqF=*n^wooqRlxB!rSZ3|ZH=zpBasANIzTEK~*2)Z5 zcbx2A7T<>ZQ`^DeNl!fh!zSO_;Tqcr4vOybh3=1&hexx*_!-~0>|Qd1jQ=42V~SSX zJe71dwW*tsjyF74XOCmY7f5^~mZork>6Xss%(Dw1*>g*M;Aq0{f1FZ14yujm!fv1@426IOt~)!9Pj+(PI@5)&VdvL!v-dCr$kojwcN4%uhgvqZ-2WjsG7ZWyJJ{& zT_rAsA;O}+Q#b5#=P!AY_O^F@#bs7i12=|YsqH5+N`A^*o@;={C!ZJB7ML`rzmt?? zkhCWA9Rl#($j%qv2GwSn2S1U$$Xqr$?F*O4JdL1fRn8!2$4(f~jtSjf4Nl(yzD`huvtgMA zUfTmxX|rLA2QS~uKp|XD*I@FMsfL9(Ll7m!1y4_-g#A-i?MMvFZ&G4-bYF2qOa+x& z89b4RAIBe9Gjq+a6-^dnB%ec2-in^UC@w{ze41s<0^pev3y^o*U#$b3UL^Ej3-XE$PGQRHYCH-VVzLA20xz|Ig04%0T240r% zIH0r@T#aR#gdGe`$DpiS2%7aZ-tinjl`Xk|oLO$gP%dG>3+iDt~Dn*}p>qMM? zu>M5X@iala2nY5BPFG(J^TGV;Fw*UC5$hA#|g{?RXyt(|f++E@3nVVtxf| zs+4*zAS&yi@||zs8K4#m33|)658)FXqELlSM&}x4F9Pk(&v059Mf^;q*0-sK4I3s* zjGzT|AYeqw!9l zHX`dGS!Gsj9ptgsW`{<6(mI=ZD5Z3O5;tYJWI{n9<*ENLHE;c|?WmkxAIzFWH<3k^Z<$_M&VmKuvX851r zU^&GV;>XBMY*gnV)>#ueizSI}BVuNJHjEIjDp_?SyDmp5wEdQYy(~{P(Y)g>$j{2s zs6JEtj@ap=abvwTUL-f@=W6<+pH04vs8b9>mCiU?Beg5~Tp{6mJp0Hr-h~@jb2iat zax^Ub!|*J4cn)#hpA7dre|ZC=h=)aZA`Zz6E0mCxhxUNl4L$CKGV4}9z24$6U%crK z*`TRuGzFMLQ53TJIuN;F1RkRMBXAF|~@J->M|hOyNPuMVMNN*H9?X(vzgY55x) zL}@T?MJ$uq0_VMnT;hg~&P#B2OBBiW`KEu-tDmUbw~2ADojY`|yvG{(cNV~sP5cAG z#mZI*yvv9h+QZ-|3`|?!#sQ+3b5YKOw6g9j$potW2e^9N=^|STDi5-< z_m$B5wy!3M?rl6jc#-16(Yk#&hmBxKreZY{&Ivu*Ew{B98H`?W-h0-vr0@Ehbo&mR z>DQ01a=sA!^MEQMipZpH2EEcb|2@osIOHiRS@QD#C}bRj5$ipbgN5Hmv%a=;6x<5z z<}qvLSiy0`E#xajbGu4-C#S>w!Dsj10Go4`Da0e9hMUh+3bdXg!i%gnq(l*Ci)PD9 z+@})Rkun=MLBD;zP4Tmu7v3d*7gde@I@$>6ECVWTd>Z3xOnB_Kv@!ZX;lmHnG;Ten zsN6&-^7*5#nCGay_g9;OCzSvz3N<6G!2l@=W&Fe{z;rnlV@aiZm92DM&d|i66C=s!-9m{{Vpfym&wyY{*%0blFtnjCAabHDj@{ ziD8X#)5_g6E|lgYVr7B^CKeyMi!Nz*F%U%0ti*i780chEm*wNSE@S^{B;ZX;eL$0- zk4(gldr$%oWqD&m1Tq}kRtZ$MPhu!}&UuR;&d;DjhPR#&%-S)TlB2w$w^aV>Lp9%_ z`_gx_@sIzfFV)0BupSW%USbaAULl({^!}iGcGW2&6SZ%{=RYopuIinhuu*Nl<3!?< zyOBJPl&X5iLG_gP+~T)i{C~(Wy4Xk~ohTgrN}ldaX>&P)ds4pL&xDCwn7tr)23oJ5Fr@X7phY zC|6f>UFri)*rPzPjL1_@c1JZs80*))mScKJue?V@6Y2m^w|NdZwpW_e5SZFt3f#WO zcWd{fRznI&u&vV}&q`|eGgd9_T)}UXl`)T9O}DsP%Br_CR8{keaTBEV?Tm%^VHnXW z=$4xYF-+9CyFk*xS6gDUosXec(xfo4x|ne>Cz0-Tuu2^X=f#UtvQs}F=i72r-Oh%T zp&{VEo|R@kd1e>?v0CpAIcLUH%Q&s5LjsyHA*ZLmfOIyjv9e_82m#Qq#JmaHJK-Q8 zNawHSUIK91bcq^54d6}T3jNQ^haTU5>vzIVQ=r<>uxaOFyO_R!ogHffklvHsNTV#? zPjdmgo(!Td0k|5p!DiX`_4bg_VL!#7S~;fC?ZXwf*_RihM|0k?qC32Ce2`z1-9_Oe zC4<~%gpQwCpS*lG7Kw=k8X6F}7`b46Gg<~v9!-jsKLxRtV5;nH!pqSYmL#jV-z>3K z7?)kUOyVXO@#3R`8RbLgOh&ohrMxj^Oxs!i;{#NAz{Dyw6PB zn1&RkXNqh+OK0YkP(91Hid2xE5OmYc z-MZI_XtNptX+TnMN8{9InptfN*!iXVqFv#}MzWbPq4Nk4H-3 z4nl9`5yxoOc8@C=>A>v z3ZdSWayFUG#;@<2u6RPsn+5)B6Gc9wa^7io@wD5qiJCS=EUz;qHS3-APVD&avoE9I z-wM@1yGO_)ALr2%KO4i z*qCF|wNk06shvn(LhLTz$2!F!{Ujz2!X8M45O5dfdI2|Iu+3QzPN$k)5P zVS5mNrq#z=e*rym4Wi(tA4KKmQNXiRz)rnK-kCHg!T93B zwZ4tJm4=^5W6ly3XtQnCIa|Oi9X81Atu7?q9BYs z`LGw)U+iC^TcQR+hp8qFVpSg(_fbM+&4K$1Kju92$wuGQR8{dh^nb=H=|sIU3MeDx zUnn)y5%F4-x>1J4Uip;f2F(!;DmY&wKv-o-^$uY4$T31E;R0_)Ock4ImMkc@E)UZ)SCb52mUUtUlJ69 zR0F!y_wFfzg7l*2yq0HK`>g@$`R8GoUl*ITNSGJh-?C8a+E?F8&-<(@*(re&^ylxk z;KgJU=MGJ8Y4#Z12mIqmlZ`@ihG-$M5t3CR{A5Q1op_c}-)3TCXeJNenJ(^B*Od&#%TO;iB{`- zcWDqz-n~3pGCx$}_Rrs!@>Pa>=xW*d!$%iV3t~*sYk&E$hk-q-+FU&k%p#dO^&iG0 z6)%g&Qa7&s@To>XRqTiqV>_-os#vLlVtX*SY zP}Ax3+*g3g&2occ&^4&+b?&Q&b+P8nF>4<42vJFfu|Gw=jbq}W64ri`+~Rrr%S~RkVu@@R`*W9Ki4KHs-BX)^v#C5V{;cif7Vp4%a5=d>lu@ zi>}XTrV=};`5-Ph@biQczSh_AFZKdYIz?nDK`3qXpnhJ37L!%k&$!k`sUe{{WI&atN>?u%!W%v}Lft#5yeO%(&evo32idXQ|)hX0*_g*&2ItVPG+lg%u~pc- z5C=SC@DD0YnfuklwrGKCj)29}>mw3wQ#^fMW|lYmGvUj_StI)eKu zDUu_=+F0Ns(g(WMEYoH9Crt?3Q(`a?;*xH&3&ST8L@DQgyjm+Vu0Iy$Hvdh%_=J|%9eIgL^X@p_XklX zs?Y8@DA@JlQPn?%SWc44-!NS`g@ZJAq)}Hf`AMu&o;W#y2+#NXZ~he)zC2O9>VKQgogCdk`&m4@K`xn(Pw!G zjgS>ty}bGC2l#03$gs#LB@hM0b_RvMubD5;#8$59w7~3EU?Do!#?PWdv_WD!3LQO8 zaWvzln^GROa8ruf_Y*x2+9_UL08z%5rS_Md76h^iUl*Sn2!8{vP4#QH`mG=txUhx=c!3k6q4{&|SwjaT_iytF~bd014FIrZ@{wdX# zvHWQuq-L(=#>-R~$SQk{@WP_q7hAlokHCld1T?CNTA&G;{vHid`0@vg31L2RJK;}F z>+GyZnHA_;Yf7&aJ`MNsu{JD&<#MZCVVLOykj`j$JUAft_tgHsY*G}_mF4$=AB?&& zb?RXSGo4qp&&r*=nnN~L4h~gNCU5WBG6=PzitS%#a1N>QtiBJSuaWHO`WW-i-V6Qv zPDu7)LndMnt|GU4YI$C`P4hKLS0th+fXXpRdX_SFEAyN;4hY)OC-qz94LnTbHWm}9 zE%ew|3j>%;4dWFtlAc$0i`jHB5`IEjvs^q!JyU1)%xSb>bNKk9a^CU_8xf{i1gx ziI^mIc8lTj-E^FW_M1UGor6?J@#=>Krb(Z(^OTfNtT>i0lO>#A=Io~Ei5Bf}skrty z&Tr=qjq@QnVq2N@X(wSZt4f^I-3v)Vr8Z!i&4tGCLuR$v|L81PGLcd=G=*ADoAVd9 zrph089E5{cZ^~qO%q^GCNE;eThY&9xW8F{oZ%l=5+U-yzJsBe!kCbUd6p01U;ybUu zk&rd-+}#-6s`qsE{2VGfuFQa;N1OL}y0$e1$1!N5^9JuwH;SR^lal}_Gc1;qLT>dm z2y`2FZXvL#^0;+cT&MCa4 z01)d>Ajb8e6la!xzw0sRPq+vzkvhxySgT|~xNm07AGB;gJE?MYr@J3VL+UxxIMx#u zDJ?A%FN!j)_j)hm%NY~JO%UYY*={eMOkB*k$c(m)-$ixKw+4hODn?$$l_E_i9GRcn zH%09g$MRMWDbw!4t;Y8Tv2BvT$zTS(yip!g%-Nf@rksWiv$r#%GIt-4C4eb^ljD%C zMe^gyCrT?lK9g#Xh=PH?ZaLTFPR4$=DCbISL9W{Wp9tGHJ-KTW9q@Hx;=}v8FgPbd z_+Q`OGCbJ6#08+4!(37`L4;7DgSZgB&UpppgRhH2?kk)MdX+0l)VaZb$@xQWER}A-$^_Go5a&=#C*~eJmue9 zj-7@rKiGBp&v0`aQR?XJ-|M4@A^~Bc^Jqn1(f#y#=UHQD>NU?xxUOw&%>PHur^v;E znr^&Pe{-6=-1K=I5u@vIpC4D=sptLGj`#0A)^NVM9NjNa;&wMgo<3mNc-N^a6ju6_7SYMuw^XVP^rk8?PoW)7p1!zJV2FEHBa)EhfoOF%0){d z>-ZVR_n4sL>uiUo^FSVZ-pct^ISlF-O{TqOjP9o()t^F0w*}jHrC?Lf?9McHc>6@K zQeO6p6XwOU#3V~-5LxYrM`Hg(Y>OqB_?*4GVQqdq&=fg{@tqr4f&Kkr6*VK{v;N2j zq#AL@SXP%i$h{6C& z8Qsc-`o^5Q*{0JyD<1S3qh}Zqlzj+|5+~@|u4VFh!(`6KB-HOkOy&ztnf!f-; zwaD!PZqJwRxQ14?f;`4X(z$Up9Lbu=nPsHJ^r$zk%@|qC$cN6?G+COsCV18cyCdma zFDV?$%eH%?#=5dKa2rzt1yN-ygOzb%XVodb0?3@Hd`|8sT`x%jpLdJJzI0^8*48C* zP;kOiTK4Q&T8j&lKDO7ZNv$Rw7Xn6ZB7X5NreP5mp^?gv#@unFx-a{+Pg0HvvyYOA z-5%%f4ks!L8A-3wyB)K_sp3ca{)JF)QLJhA5{tj0DM@Gm_AQ9s$UeRr*rvsz%ilp+ zcV3kU0#};R(l5ugEzFn=2prI8_E}N>@~)*On4m}qJe%ccJ;Krt%9n5xJ|5(d1B5fW|eT+I4!(W1x?~d(4~r2_U!e{T1R*? z-74s$p#eT2d!;A7(1 z{XXltH_VN64b;B6E56ZxO`U&r{(pW21!DArv7Yd~esNLHA=6;r&po@tsc+Y(?%2<~ z)%hRBCrC5Wj({|(BzN_>A9ueAQ03|&bP@wvi2Rh!X?J{9BEOBH`(HN3@SUyDW;saR z4n-6L^1-KD=H&rYn#`B|a3EVH0zkCud?Pv9yW>c`uVaeaX}m}P7?0_A-Jd?yTZFpp|NhYbpdOl;uqyj`mGVN#>n_KKN@j8fQU%5yKj`w$ zwtntzCJe0q4zx<&cR%$rOh&ul|M_~2w8=|#z$lr0OU1_}__4RIk6v&p`t6pXlo$En z1a@)pSjADc0CW1IPp@C6M{-3X&qLz)wRqqsf6C;PdFg#8(|xR!$yFSC9&a>VU*#|z zSb4OO%*_ozJBce+AW5?dKXMc!-BE#+9^Oo)9B*2E?G04xJmy8ez!z#4*jxVlXQ6_< zU)yOvmqN_82Q$=QRe_TSW=pfh7mSzoYoW$jH_~c0FAcT`q#p49#x|W)4>$f%v!FRL zZkH_3ZsBJ~uyxX`;YI185Gpo`hdHTzp4Q@ye)A)eXY2WY2JQd;Uz!|>B5G_&MA>{) z##VkfxI>&h;!=H-beofYqI3zJ&u`EW{rsIfojjCNlNIIEU!E3u0|7YsMLp%RNSX&7 z^&sHNyOJ$nF{4w;K?V?3A}`Ba?E<7x!)MCwMBN>DI8J%ecF1XA!w+?tJN-z5cSfA_TnLhoC!JFd)B?Hu&= z`^~(CAyC~#-KWZIZ+HQ>wBcJM8~g;0=!UO0g|P^ZZ18ePqfjh2FC+8Tzn&4r6}G-i z#mIq}Q>+J)r{V2SPveY^-E$g;^rSi-?S31#%-tf{^`?y=efNp`%i?Hqp`_QKk%!4* zRa#LE4B=tsd$H@WgTJSL}+Jl35Ze5*Wwcsanek%6(c^(>3lDiXGvUPWb48&??emRr7T zzLIG?PT4_i;2?gTl;(o54j2~8e)V4GeOlW!j`iQi5a4PZj1}SfE_)S%v+bYfx(n44 zGU<)S8Dze7uHkkEaAY;@O{6FLtbI=Z?e|r)yWw`thR@re0}z^3)D+lX`e!lYNMpr2 zG}cLf$M#ez5TyJUeUg4KAKp$rKcm-Ic5hCU z=c_bF=iG_ot26H$yuxAb~jklLt> zbM@lx=Z9qQ#|sZOOD^Nv%74!9j+iB|Vje5Ha&p|sN9|KO3nR*9;Dd>08)ngB^m+*9 zGOKsEQ}GLU4tgeQJEH_rJ!~q-4D?+H08;diSrhOuY5wR5rcyobgvZFZo>^)J1`i3X zaY`-%bRx-rUo3AaTD@XOc6zmAvrU$`S&DhgtFpL_{%6b;6-Ph7^$xH3q?3ijYOZ(i ze;Rwk7-0UQVY&ag+uawt8}2I8wKG-kxw83ka=;){Qex}J4@VrIK`Q7vL>OLod3OC- zv+VH+J)K|mmjL}-Amel!Os9!%?tqX@f}CLOtVy+oq*}%dHd_J>-fu)!`Uz|g&P99idCE7Gxmj5$kMe5s z|G^`n*)-kGeCNard?d+gs1xxMOex~k4-XFg zpzNcGNhjmi7J6w-#jH7~dZkF#*t0~NIwzxF{aWnz{D!^}4GgwKb`9Ov>Gq{WzGk<# zuJm$zo7`h9zy3^j11p2XDfEjS+|ESeJb6hor)>^M9GCRSrP9V%fW{8yT8HzZ)jOVg zh0tdOt;A+Y6ncaOJ)O7-Q#0h3m8z<$ey*bY{wx%-bD0@Q)DY&K}Wj-*QRI;xSVj*aAw7xfB=VH zu}obej{f9@wyT{geGm_5D2C1tfL@+Z*7oo%K<|!kPXOdXTOuWbd8KQ2ALX%ea+u}* z;~|3^kUDB>wwiZ57kM>{XZ?O@d$L+g!@s_L%rhkFh)-pum~%=ql~H>60a=6512UJ% z;^^d%u*4r>AL)3FxK_5z@%W6NrChxJlsT=nwO6PqY=)PfEs97JKAd)SsnqQL@v)__ zfheb?+yp4?a6~>>Co$Rio@;J@UM;!*hjv4tTrs zCSbL6^$VqQy(;cLbFs7`?D(eG{Yh0|eJ^@0OnGd8T}<@tDO#bg3+rlqegypf74T)c zGS`AF9rgZ}y<@gA&YspIWqb>XwLEo1e!8N^*Wjpx)vTQcU)Ix%J1mV?$y_H| zvwzVQM|SsSc#%WMsiD1qhDhv(ntbt#aY7K+a|oIahojwfHxiVN?;*;T*YZW9^(Sny zbIQpsJN-cVmB1V)3c6WWXpb_kvZF1|K~o>r`2~u!Eb- zb03C3VI@y|5%M$!in7;0S8)gYDI{M%aPFU%&|AZklD&m6H>^WbuqWf^=i7Ym_NqTf zU*j&G%3*hoq!i5iKxsTT`$+`f;UWSIb)wjmb$*-v$%;CU1Xym}EQdLLs_MO_8||&% z0Wajo!+x=#vBB%mh7#u#@t#DCR@o)OIQ|-3xtuR|8a@(k0dci^fCSnqDDw2jTfOzW zl1nJHw}S6zfqF)Z`iTU{si-&^W&G2?t46S4(H;^Im4Mjm8*WIVq1cwZ=tg|Y?prV4 z#X8Z39NU6_J=^HZl;sJxd?d4z-@t5W{i9Q908Lc1O1?ARjaBo~d@3pR6mHj(y#ET5| zJP*+xXHvoCP9)N&q@tsA`4u*waW(7Qi~UXKM80O)?!*hZHRTh`8YxAh9a*D-N6NR8 z6?%8IPfwKkyRU8kxMZ7h58I2Zlg&eC7jd4!Cd{lz8%>uArTzv;X1{XRGt?B zTTkU}d;*j6pDiZSiG75=Nb>S6tu57 z?Qe?^;8Ccgykec_+VxI-cG8Uzsl5+UGi8IL4Mf#v+DEbLKMnHk(>Gszm;aVKtCS#t z@8H|KSL?_NHxGQK3&JC^!G3hBiEa816>DSS;%KeukJqVRj;hXusdM;jFXC_(B&4Kd ze$JD!PyhLJ^kNd_1y*r4dMh3xOq}q6H=-m_SjXhbp;-dsTLtG$_ERoxUlo#VT(*Vk zB-lrZ%+P0z>R^Ok_?}aP%Np7P#si6=$)kSN373p9IIm| z1QRpKnu5Nb6_j)xAh}mViQ-ptJ~M7`x&q>hB%6C`iIjCkqHk$0E$Vl}J5oGU$o=nL z;xDkq1RK4^t{-$o##CJTq^GcVEob}v%Pp0fpAF9AIlvJHTPnyIg@OoX9TrYsRq59t z#%t2Ltc}4O2{!t!FJ62)n<9=|PH} zLKoh{U@^+dAXBSRF;nKuB$nRsN1(po|GB!^UV~(C`!7`8J}K*Rf43p@(oKwVH_fkH zl#;rtOc(2+ZG{Gf~Lj3vw^7C(@_b33Se4sfR&2QQzRyPVRsLS0Gv`MYFv-nU)-)d5ftZr3=0b0iA#mQr@hwxBv-9M$hIAtl(Q zA=J8nH97H`tvGspqEEN^IVM&gCVD$(Sm^gQM5fO*<_Fj?X|F%uhUf#dq#}kQ;qI1h zn{H731RGKBp+DCP%^jQhzw7FMD{Z7Tew9UwZm@$YHwgS&mY-jxm&HEp4xLUB>R<4b)2R(`eHbMp3e#Z{xs6mVyLuE5f?k2p~ zTWiVckFr7(_BJbP3T}*--PmaRtNA~<+$m(2sBgQ@8R4RI=Z*|JMCJTMERD@v&f>c; zXWq2MCz-6!ebeJuKHw#qGX1@MVY2Zxqf>!Sw-0k)`w-vH8NUwj%~^9FI!%(U#6Mv zg$S|zY9YJ6xVSiNAl5yW+RWsZSCwYdyTqa!ZlnyclitSn7$aKZ)mM2KS(5_GXs0$j z_)~USh2Lx>)nxfw{2$=_-!~Doyn<0+k}qVG2P+slI7;Fx48tc-TP2R9!0}YEyoT_E z%p?6Js{Oj9p37|A=N+hlfj@Tm{cC>{3p%D zGtJI5m@_JLS^34wcO9g7c*sR=_>0xQCN`k_r9-O3S#xR<)?bB8^A+~Vfr2yWhWMri z;sS=B*yqZ}6>e62$tBk);*YB8XSSVon=zp$80fzz(o0q7P|8|6SGUjnJc!k*3F2AR zx^8~W`9!exJ#6j5`!;e02Rwdda%}3TsE*&-QuAF6ig9c{YyG1f-S{z&eVvTlrm`#I zBH4ihwJ`Jm;8N9rC1|t&d7!!pL}~9;yzxm;mXxP%_e43oh&JFp%O>8+sSI8_ z<2TBIpm-rVjhj1mNn3zbEu=5T=kdZKzGTI)Q&t$H5o8bO zCs;54-n&g!BrBZ8R4+EPzb^Zy!)U@kyrOfhNnH;A0)2INXy+xIKFabi*SE$IeTli$ z9)aTSHMlo-?AlnXgnOxu$$P1g=AZ~vyRUK>TUOTK_O@ZLKs*WVjmx{(8@hAs_qkhk zG?4A4F6wl_5&N6scT!(vwOY28sZ!=z2HnF*opijQDc$o9LzQA~m!44LBKZXNayEiB z(T38%g|ECt2)azP00dedPSCceL8nD%CFpol<+=gw^(M$9lx zFP51RAes`P_W~_^zSC{&lPeguc8y(^T$~!9%xdp#j5>logO7vg03zB)JZPn?|Gb@1 z7T5%o{QO`++AoOz&`U{s?c1+FUJctlYPO&*!A>3=KlYT3TdZ!{1!P#b4vDCTh%X5k7l&?+O4?vOtJw7I@c((D@Fwf>&#Rzu1M0SN!hGAbY&gof!-7*NIdY z4EMoiVH)PJqGbW!R4Oz<>gdiB9k$jpk%v}}tcdNc4|7|G?!?;IGV-G_6|)!qe?*A{ zlsqf|ooB<{f!}ZIjouCL=CMYl4;x78FY4KU{XFXi6s)nQ*`9w0R7k!o!v1y${@Gtq z1w$0hI}p7H1TF=a3U2zOx`V<%!f5-C;834TbRQ-#^5iUljxyYDj4AT05ZQlS}S@afjtN!>wVYM*RS9(2LbA#jt+Y0KkCFHEvqZczRm&J z)6*}RzWS1WIX+kAd&WU?^CQS7|7Z-XmTzv1(3M-Yzp#sYz#gbZ2I2>a4|U7V-S0yL zpY|L41%iCZ0>6IN9o}tW>a1i%!qd9XWd^&ylCZJ|qKv=2)UG*Z{;8+oA+AE6puo%> zPqLl;Hts?=-i06XoBp?N$>wHusALVY1ylDbkQrc0)C5zkeguYVnMRwdMNUa_atuH?+&fj)E~iLAAp$WXgQkHkb2?h=+gtx0}0Nsy?*-s%O|*MvVl0O zZ0;mx%hX^BR_&ZhXYJhp5A9P`nqMbCkKR|k_-K6oJF6dtSmpaxUzI!Zo%F66ajBgw z{y|?#a(m6@GQgxj7(=$QvNBZ3J-7M%i^L9VKS;_-KnlDIxB8Yx=`mV~3zKf?7-$fW z-M+$z8(Lg=BW@PK{YZe@KBVDgM`aD&EZgyM|5Rt$m9}Z~nfA+VmcJ z4xALid{xb*@&$AD3*aHf));|ppXBjJG9`act_~VxD;BkN={JQ~l6g=^Cm#LaRk87% zM4HZhvo9X!$hyd(yz_~;2Dg7>q@MN>oGk=`(0&-RD^|_pQ7jEATMyV{#ZlwD4 zI-1M0Uh6*F1@udt{zAdKxSRxBL@TPcc-&t2JboXa%#w}Xx%C|%LYYyTT}bAo8$Z-W zK?wS*IG^YZZbwd664{?{uYYz*X^OwNB5QV=(h$nQLCUDyyA0Bkl===J)(Y<8fuNjD zF)Ud_eycXqV5e3P?CsiMkW!?`I~@X*;Mki$=9UL@eBT^eMfSM{1$B0} zx971vf8M^+PcNaI2%8M%>{-?P!lYzKNzdg`RI7nhXbR%8I`Noj0dcCSP&_0fC$~!> z%}2#t1tw>|4og<@b3K}$Fzb!y!S*yKwh}XM@p>j}!@|r)&7ktt2e1at?MN1!RjdSp z<=;K&hrJ72laM;6rwSO4dcqBwX8}{VmC_^;b6Eg0XX&Z_X0iqz_F zCh8F~Z3^rCcZ8C)wP-hYeri2Rrxw&1aF{wxkWHSrdk(pHE5Q*aE21r~ z6Y1mtnQEId>>Py3aoxhY9>Ou7I;Cl2=#hSN^xHV_>3_?*ip55Mow?FX&VRYcv^}J$ z2l))otRj}uWeqLN?q+niz+)Hnm=or#iu^=fsT7J`LtQHyyjym)7U^v23ENpFA%aY4 z9$+WlonbJgaboTh&dIT9Ewi3AY8hCsDrW1YQBR}*}8o5tp*4D51eePs3 zI~f?)2XGtii|=D|kVhoTl{R{Hp85jWQwitnwXyz1a#M~(esXM@H{^lT!EfR^S8tpB zjw1^}yrX*Vv;m>xrzCyM8Hcwn+NL(v(GkxxynE21pgy23mq0V+#8BXx?HrNpcMyDM z!0z5&N!L3K&5o-oM_y#wA6uSyub;zq`03cjpD27JeB3$ujpgTk}!okj6!6Vsr1R78B{(|Gu7%*QY@d`)YynpL; zBd`7XNI8Y*S<83W?t>qp$SEmR#ElsNyw*93FQQcUBqF902+P{cXjc;ncXr}hSKI0& zCBEMMR;)c>G5P5(in=+`ys*FNp_1b-(=MC0>EB7}&ohZ5u=X^!JKOMYv8(DmOds?7 zXq%cQCv#ttM>RMXk6`T6OUznMbbQU-EtA0y?x*?!fnO&zkQ;wVg#b(TpW?lfA|vqJ z@0|8)eSB+YM|F~WV*e)mqcOT|T~wrSBg|b~T{$qTEAJ50X-aW-U%wv2t001&gD{1G5SpKt2d2Xa1{_@*rTsE-nl-rk$` z5?F|lH`mHbi|U2!m{d@EP$&uqG&_@$4zkFpsky+n+#>BQR|d0ZXLLU_zEg4pz8avo zs#5cqoOvL(`19|?EmWLhMFOwev?u=+iNmp{EeYY1dsHZpX@8u$={%($9?48a$O ziK%AcyV)0);7CR2{=T?6sl7gD9QOrZb#Zn3V;i9aoT`TpFQ+q+D+_69YO+${9c{?H zX51#0Tz|T>t1vEhs}#nSG4G7R?BGvw3S+;Oaf#_bG|XIJKFC4J{dX2Xg&@{41NM0p zmMLw_nexkl4O7iwce?|a{p&OmlL)T(f5Y5TV7fTe9O_$@8X2=L4Q2 z5qK<-7=Qf|#WIoAMcK476+rxt*1F>6JhpvFW<(u$;?@RA> z1|Dq`NGml685;Q)P>qsIPN_9ql1jAKCL3rD=i=smSLl2{ASb(St|9l^z0nlu4O%hB z=JyW^sqV|DMe{a(Hsr!KXjQVKL4UiENs%3fPtT|;92bKrEK{bP9&6p^ZDjw2i&`+c z>}aB1@LE^IT4+`R2=N4fJ)~O&i*hnMwlrXj~ffLo@yCto6(7K z+^lob){dSfAkrpRk(e#7&QdEo83+M|ruM}nkY|%itOMl*de6fJ1IQN+f2UEFEsAhI z*5+B0$D8mp#ROK*F*>QrKG$%t_UlJI?Xt%ZaXZQ<33GJNlQ~F zlY65Q%H`Nae2$ZYqqQT@WwztR)rBYe@R5!-FoM2hwK><*pr5L9Sov<5Dp6Ec4%0i>uIa5yRTVEZ)oi2(gxrMKNIso8@Ou4^VBcUU- zpuMX1cp2^LE$I1I`C)PJlEz`bECRX&N9@6n%4O`XD`)8fk?BCHcK3?2O27jUwyl8y zo<8yKJCdfMGWN5lnA9KxEsf+k%Q2&=A4FoT6tanDY~}kA52Mr*I`nt3m*Z~W;(B|nSD#2)52B zZ0|>6J^K(hU|W23B@6BqzE3;qOFGP$`?@ZCysOA*`2j?(Q@}?iw$vexTbl%V@n9XR zGKtIX8+R(}Oipp25PT^lzCm?*Q;MB)PQR+eeQk2J;%w&p&%%uu~XKtk2*VJgY# zTeWuynooO|p?1|FV*@lC`=fT5(c;f^gMy|xuwa8~%F5JWnM}K{UVl$d&(hLR;DAt_ zwoUt`H$CyQ_FP2_03#`aDW^1P=&iG}^Onubn2akO)2Y1o-b$1KA|-5E9@HmGGqxv| zHpq}uF;}f^AIA9O9rA{Q69!WQ2{X}Q4S_Y{LxllZI%2U;e<+JzY6GqJR_$(G_Bzst zcI>=OI55=1*eSDL@{)(`d*nGl=+$L@1we!@Zh`Rw8qk!PSMp99!qaGtyekc zF={9zh7eR@!2$t&ffUeG7-_CY4kLiNZON=kMI~Qex^n&c^`yxz2v)lVjJoSh^KzZL z2TIuAc0dgIE=!{i25@oVzN{6C98thStO|~q3ibjvm-u^@WG>^C=7B! zhvtiI?THeUUlvra1fJgx0KdF15F( zhC#^{HXJ*$HiQ`@8^T>BH8K9x_WSg4#$i*h`0vURddhkFM6E02sXT*Jsf{ZYL~&b2pn}28lS{%3RS>(@i$F@b-#8=VyExlBTynS7 zi_PF>-Yw~Y)oM$Vfe*2^u-SBMjt0vB81cfez>J|EKn=~S&opKB2R81=6A?_mjwq*hVDxCa!9^ zem_McVeJ@yYwMk2OZ$?}JO^s~>3eD=#%mOXuVpe=v{hTs9?A|i*xT~8qn_~%eu#Ug zCZaEeW8{dvy87#s^mI5JtBk|A5x1fYKw4?p+Sx5xHk|C-$Vd(hjg2L5$Pp6)tv_r4 zT|zp%=aE@wJ-D{J?U-0toAgNa0EwLyvXe^|qb>{C(8Rne9Ig>t@bUo2Yajo-DlJ&F z$Rozc%KEy6wL*@AgX1m51hkvrI+E3}p03YbXuyhc51^kdIyCs9>dsc}0Xi>znch4A>aQ3T6Ts_S-TA5$*e28cFa&QrMg~TyfNlhV5DMIK1X{eY~Ih*6&ehD`;Hnk z(d7lv(bC=qO(a!q7^e>(kM`uTa@qCyQJb+a+wC?DX?=xa!M)nHwGCNH?s^lm6G5jE zK^}EpBGIZgenYa90;df9GHQdsVf*X_(3QRVAr6jieopBG#`+^2P8?|E1GHTuO77g= zIX%kY3MNuH8vwJ~2xjhBH1wTQq3o~y9tsv(XRR-TN}BYYJ0X0V03hO(dbVK85kR%J z?=olOq7qAQjsz2-&W6j8{Gap^Dow_~NO>wy!SzZvd%poBSZ-8slyK1Z6ZM^UPXK^2 z;#VfYL2jNCMmM166xZ|;my(hV7@vH*8HLHWTI5ji(OVwspqIjzUxWH{wca6F)@)eee^LHnwOJAk4q9mqTqsmqKVL@2EEDzZ5VlO zyS6>?s6=HmrM7x%RJ{X)ixszPuW_PVRg{g zy4jnuhNpo}e^Zf!%UC?zfe-o&A^ zl7v#0Q%U};9G1D7c^{1NJ6|HUOHe)pU7I=pZ+*TJt{>#ja>RnEG zGxDHWZ5wBY;G}Z;nxwob7&m>d*foxQA|oQ>7@z2{!-+?uRqlJ4#(n$Pwa}QcWebpk zqU7V|C+nI9_^`i2l0fApl8vnI;ppeKxy!8LwDCQ?$%ZZ-0HqQ~a|L*( z?>ExLan{l|z!8ATB~2iQjKilv*4>H~m{fOu`8F7UnQFT+Mi56U?!Ps7-Qjt~-tt!8 zH~w~q*a5KpYgl7uv)O@r{eu_)lHMM%sg#f5v7LB9$Ir8*7`H5z9@RPS1!i`>?8j+< zlC<1w%(li=kO1eW%pagQv>wb)H9H(4YS82&d$HA}*R@D3e6Z>>3;-T|58UP)xV>(z z1ew0?G(67|cvNb!;*H754#Q24bBu;tJb3m3v{>kKHP&^-+X+M?Oo0UmoLWj7_a*lA z%Vs;4!SJ{hqRz0q{6t1Bi8y(Wmzh9&PcZNN>JRE#qIhTZ%9<2q4}aNIR_O=(-9}7V z@gE#subKHelcF+X72vKlBn26TRU4S^a+!Xy^`M#Lb>p3jwudY@J3G&3h5oMb<(b&` z!$^|+U++~`4HQ;wjlLr!Ab5^?Wp?Az95dL7AgS&c&rNCoZEg6EpAUF zJ+$+}IpBRdmBnv~`f$BA76e`o~*Byz^x}m>11qd99io-^w@$HWeK!h#tox}@@|W*G=bR11gN5VNlp%4 zZ~fQ>HT@1S^OKTtZ4hkUElVIZeA8pzYy39eg_4W+Lztl-NF2nh_m%S~6$6(7A! zOG|481p|51FaX;&Y!B3`!KA9foA^>LFO{PLDOyY^jM$< zZK~`D`!D`35+H;pLPS>SMQr-WxaW8bx}zSngDIZZW~gXD5V>+{SxSbf6>o07Dr+n> z@0kLEV>OJ4DzbpAf?O+a`W%f)DYf=ZS?JrWA+GaDNk&smGp4tL^xfM~(uHjj4%oNX z)5>@?PW*vY64=xAnI`<8VXO1Xu>xOjBQm&sTkg8^cYo->VBC)>=l%Iz06Fw!Aqxoj zB$}CeAZW0Pu6MW{<PE zUsmSGZ;IC9pg_33U2|525MD7@3o`GUY%G0p40(p}@Hzr)uXnjsVhZG(?GKMumu9|U zP4zLo=EYt=5hPqgL;Br9HEM}OuIS#?D!=OqkI z%B>H(b_)Zz_fPH3X(B}=91$P@+rlkNq9#*QQz6J>RjmjlKz~mdNLvqb;SmrRA20XS zsA->EFq$z!E_PZcnH_MegE`w-J>icJ%lkMEU${;mlCU+$R}PC*Tfd|L+=9jnTd6)0 zk9V8G-d!4?Z2fW`hm7II@X_;6pK&mFF;QPaC)1$fGdGc7|6IAp^Z7J}IW=-rN&&Ty z#|3s8@o?*V9&SE(V2cJk_N$2`4Apz{Og*2V3Lr47o92ObC#-iMZn*Foi^FnW&or|o z_}09wvPy#8@>7l516DUp+V6AH+_bE4J8rw5U*D``Q@`Usm0_e{KEM3vCd4N&S_0|3 z%RBCypP97^Y%|N=DlA-gJowD96)PM4o>&*-l=;tc)T(YG)c*)#=N8aPvY3d`q4V_u zGH!+p6<8`e>alM*SYn+J3tS6swHe+MbK-?YKse=ZI+tzn{e>Y0#c+svr}zZOJb3wS zPc=9f&{)p!I8T12TT1k^8gcFG^IAV)vPz}I(o*T_VsHBDqY3v4C$VQi9HIp7dsy$c z^G?Ht6Jz!B$KwL8>MI>OYol(yvoXSGPo6$)0*K%9IAV7%oDddZ32@dCJm_cT7?Rr$ z;oV^mOTD*VPq6F$H7MU@8jo-5Nsr0Y9Xja_{lm)V5jiJ8)nmrsus8;P;=_mXGvBKo zj#Spgj;84eFp+(xv0XEtc|1hE`48bgapwQ}0jkI8-VEf)WR5#sp}dXF%HMZgzXeQt zAKYAPi=d32ejk!7uk`u(od&+Qr(8XbgxDm*B!~NXV$37-gEwMDgA}=YQh30`J&W>m zO;^ev@!(&{z#mL=%F=)qTxnj4s4Dc*8QcTIWyJdC@`pB&2L~e&`BU2{_2wW`AQ8cL zbBt4z5NtKq=XabRihNcG5HkVSOCiKHi}}!T;Y)PS3+rmXP$Z|8q7aajq;z`Q;=yw& z_kY)Pe?6z!LxcpZv`4iUq{(T^U&5vvd)~S!eu!c$sEITO}0CFI$wbE-HfUP z!m~{HF9(TMuAT8=AlKBZXlO27r;V2IVSF83>xYtWy`rzRRb$YaG3?><43 zb1cGCdM~&zXl~Z*nc|5cpc2rtGQPsfl*zZLDw@4N&)=09#Yo(C>rD@*Ih6#wI3ygW z;Dl0t(fS{kAQFbLBhUV{AX+H6=W|H*y^niE`E@SA={TF%uPz76XZDkI^{@3*4t^_C zzk9bkN3XEZ|7B=#$aF&Bl4HN4_ySLRTS4H-Wn58G)@62f<_nF8VR6m+oEIOVQ=R*s zs^<2)QUo8cD&sMU%a;)2HyO$=fWrFofzBbeT^Wx6id0&v7-M9##yTNOQQwkiW*p;Y z8V-^Z;mRvL5&?&>6LIQ9LyUI6mA>40&TrKHymU)N@sy^pZj@|wQkarCxwEA2wVU8h-g&c z=+`OG(lR(La)rMCEeQBC<32(|7Es^$m+Z*D{t51tNbdy1|G|Sl{<2oa%MgR^g8KhZ z4nLXLn_-9K=j;FR1+mCzX^%b_68=)lFcI9y!=?*mTWfh>#5@khu3P{7r%wExYzXv9 znwnHVyR<1seD&>}zV;)l@Rtz(=?s->hVXByQ9PThJyjrnn6$akB=_|?3El4>{|7$w z62`=p``Y0Ykq3~M|G++He&N*se5c29_2_^2xZgiwyaT*@XMM!v;s4p&^Ul=~Tn@8A z{NLvQ-?LKy-hJfeL_+p|^!CThBJ58ajAPF{%dM!%i=QHBV}Kae<$ zioclaoa^*iJk9z*_k$)4d%ue2I_$drTH4Hv$u;Ig8^US{zvw96!x3U$93R6tO&WVq zLiBF95%J~>y~h=(glsZqZ~gi}k=iS`gcOsS;RiRJyM@w{e`HcXlsGjrGw2iaV60C6 zJ`_!_qQEX!?KQDD32V>f;K|*&&#&+&kvR~f;@=xb%12zNBLS4NF(5$!lv6T5IE+b5 z1>>DS8b`)fP836Bj*mU?g1{eYA#>@3 z5ala#VG0vVoP(JXQAH+2S)YqI37GzcgQIn*ni)8s+TGs%woE*5@-8!dgoZKXnYqO1 z5-DBCVKd`L@Mef^k7hbUvfpk?pTV%ZMCx_AwNr}yc5`4i10<#}T0;7aM!Bzo6slp4 zUjESEw$Y2QOK?uung2nIYju#x2N}p2MygT`Hjqx9XBT;~l|Z3Kl-t0%lmn2fvk0lRt`V3UZi$E$pM(%f=#!06y(08~H#1=2oWFLJzWib&dc9i}s-G```;dEdd_0V}*#AL~H2nj^=jEyB6 zi&^ZldZ@Sl*JnQOx4dTaXg8+80zGZDprYjwM4}Fq&@lC=Fw|?<>j%c|5O*Ai0M#s9 z`r-ltM5upZJ2NyG!V58ZP78IGknZm8XJ(a)`1EYw@HBDQKzNpqr*MA=A~_+xdbE=)sr*iTP{fM zb{ILydg#a72Q6K78aaMd_>jc!0-yqJC!*Z?#mqNG9Dr>V!jx7)w?5XYt#)wco?HId*3X%B=AhQ^K_dkR5Q(Hdni#Q`+ z+8jMGsmYm7Q^OXFV_G@&BPbU!#H`$}F9;1SY^Kt6ncnQ@QlvxXK}&{IDkI?Aaj8Fv zlt|)jSk19r{->?w`+=U@ZRzY){yxwgkEQc#Cs)cb_kQf=l>|(1z;oH2Usv|qD=%TRVaE=9yvtof7fkl>w|74VUqXV2w~A*=@#2+3*$CvcTDzX*5+sK5%~=U53B#jH><^F;B}I<0R5{aTGSr z%>(H&>t;{)QKt+LtUQK zoUkwj3btg)gsJ4@Bsg@p$LNc29qKV1k|$jLO6|+coV6P8!JaQG$o%v9Q7Vt zKh}lYcE3pT+bfYoG`Y$Lq2h&BSI1h9M0m982*^|@BgTl1sOWS=34NXgULZiNhoYj# zskPn-)jV}p@Di*1eW&l!15GcC+KWrHP84H?wwIybTT9-5$yA(*nvzegGk#8}zWZK_ zlYu8SsOgz(u6;J`x4pNGE z=P77ykWe+oaHURoNsx$M;E_mw&aPOfdIXyePj(6X5{KI>!Y_`+7sxq2j5eH>6PAF; zXT;$bow=vKl-|j|J;3GNEwnS93TcS(@>>BYV_Pe5HU^n@t!vZiD>AC~FRxu7Wfj8_ zxDB!p)Y|jG5UxQtfXi595VFKu9QE7cbb<(SaXVZM4};lV7jx=kKTMwf7n4 zRan-XpyXu-@1MDe2W&vKEoEn%CfOm;`Wq#bd_*c5&AKv$CCQy*v-FBuF;X-c7#7rJ zWNh-LjPExkp^Qa+E{p0^&rS&-)a)9<$U?&jdL;CTA5orBNmtkS<||gN7EtY8GNe2t zp_xsWu|^@b!)q+@ro+e@N5@K}A2FD>|Kw0)v8_qM{wNaU`Z!>gIZ}hCy*jGE?Es)L zLB9^>6@>Z8e%82~d3&~Rph4=^_Ag1U)S@cZh@G|F(;{vLAnDi4+ibP&r@k?`?`}2t zYQB53iiRe?!Z>2FeP}vA0Sh@=M+Aw99_?{K?f6%z#^AeB(w-LlsUVl;Y^N~?1ux@v zkZ$^3rr&eIQvd3zBoKCv`^AoEg~r91=c`REwigbpZ%^$lmd=(sVJ;SM$P=g;$8~n# zF=0xm(eUzWLg+kwU&Xe3pFELn@mnN}uKU2;@7VJCjc>2XdBk`OaS5kjGmGeyj4g^* zdl}P;sv^s!^piVLWeE% zef(js_X7R$ZIE(qL89BUI45zdNagA^EtQ_u$Cl-b&0ox;N$<&LvVVU|CgDIA$E9zzS#xEjGUqy&)5W{m8)cKFnNSlBhdxZL9o{q{Lkmv^29LR}q{)iWcw` zk)YnMSNCHx(BeBSoj`PjI|r{3go`_Gy+AgHvo`DXF>VtQeFLILQ`2$7f%| z>&k!ZVsq!N1dsn(=B>qtMC$K90}fv*ey2V6CDY#mI)Oi^ORB|v@c3ayGm?SEc6c($ zb=Iu@?I>oC5!yx*Me*BJ9912{LCBNcia*`sVD{~WLR^KKT9N2hu3nLxX{C@Jhf`|Q z7MH;1Pw!}D#E?0SQ&!#=Xr<`X5eAC*R}ROL?qBXIZ+kPy%!tV1?c-x1n1Jzv6pu2% zF?^%npABnjh#H&ah~@dnE_Z3WXdkcsXyy26$D&mX7rv;QPFUVj#E5h=gAe@mZ=6r9IuOK zbp4fr;y= z|B7PCh`*eCyEuTeJ>C#e(6sXCWz*62=Z2NY@8+-l97DPSMn^!i*<5#ax)!4WGusU~ zjgLe$q1hW4al4lAmS&IdY-qk!c*`8iE$vt5HB^zcdA*sr#;FA>=Qv@d_JLTZ%q581 zI23NB&)A~^;;?gpZ-3nVFCFmfRp)y|GLF}6{EatJ9 z)&Z{n8>&yTc0>i`yN)&Z0mQx`qnAIiN_xSil1sx zZAqmWjjAjo28;s5Ry_XeW#yOqgwh0kBvM8EBeCO{G)$m+CE)!8Vk%DH)8B%8`ahge zqz7|qrK!M&mh17oeR}td0IyrQe0$O8bu&dzZ3GvS>r>PB?6O0S7Ow9W z+XXM-h7}0N+RJEH4Nq?X^QoJ!#j8k$M>^D;%1U;6x*b2jsMWUVvvhwtE|fcYZ`YvC zWOyrmfj$8g35(mTT=9pKKG(g(Cmf_3l8`LRUQiV1Aot9R+M0|TNS6hn`0hb+2~)o6 zFqI`-!sSpY8SX@&2mG0D-2=#Cw*o=g&wXJj@-dD%;fD{Ik{rn9df4ii!+p~RnXsdHJ`F|G&~@ZQhZKw1 zhnUpB_w7^V;g07Ev{W$)A7wWFh=H-FfZvEj)s6h;TOVZv)fi)wIBDA^!8ck6!{I;{ zTYTsk(-vm?(rYRnEWS&v9ltBpICVbTTv*>#Qk{8U{x!?dXY7niygRGf#S;d7TpynR zU)PQzLgA{4kbI-#x_N&#mH*4aB}WOY(#Ej@N#(GCFZ=euvHDV&xx}BZX`dO%FODSe z!w~mMP&MA}nHEyi(9@nIKTE|`7ZCHU?LPRJ?Wr_R0v}k|mp%W>7yw6s6V|F{?zs9y zVIa@h=yo{P#xj{fh(nU#zW6?N+}o@c*dBW8k@2(H33?EGr^ecBAve$_sG+@j{rd8S zocC$y*P~dnsQkGsG8yR`%QAs!&KU%X^8-fVY-gsw9rtPX!+{c1XNM{TocU0N;5!;f z!bQHs|LG(A)8kyd3=7Cy5ci?AFUM{Z$rE7uXgRZiby#iFuS!v_+2b&KAEdcU90mt@ z5u@Yo)i3nW+_+y-R{K?ooCzOL8H^gjd5JgH)$Q^NfD2QKs%5-%JJ)84MNBN!4zM^9 zi}ur}1CNCWPfl=P0KD(xnueFS|2)X4%+x9(W2|kfBFn!zuV8lWb9I&LNT32G!FG0J zPO`PGxBbdsgXK*}o@_7FbHGwJ3wzukUhd5EN6(NZS2Z8?Pg{Czpl7)<8tOyB%xF>o zlJd%vnsBFCGaxUq9G|YcOM@N5-0`v~Cs%h2a&jL$muas4(H?>vXiP&xDe)Kk> zJa;&4$<+(k*p5*-$w7qT7fDE#idhA!#K!%D#zNSH-3OLior(2m(i0rsRJGlnM4Ro6 z*iyY>4$t1qHf;_DPCd6iJG0<0vKFs5(?(C;WxVeE`t{5c9XGB6RZ#j5lS=-m^gI}q zw6!0|57@9zK2aD9iHKQpEL_SGL7$o9x7R~rq_3uV?>0vrmioaaK?;Oc2fJ^{u7PTv zHP$B_7adjA$Lx*w(zz|AB_shCRoNdOkxG5Zv(qRUWZrNJKhYKAG0+*lxhD_&@eD$& z!7&9D_nHIOTJcE*?>LTo!S6V%3|3IDizjVH8IC@4(g}$Zq6@AP=s{M^(moRIadM~6 zMIb~%#a=JV+p+NF>~<=}Dr{_N%d}|(cAC2;ce>}9J)1!ZhqnFPkP1MFdGOQMYoh}i zVSUgJ%EJ@p$xlrSg7fG1-`Nmsab3}CPRF#7 zBk3-eub5Z-Mh~eYkuWr%L$9^!o@DiZoMq*rG|2FPy!LH5Uj^F@n{*e&hFSD8+RNjb zlCttc`9W}BpHP_|C%vZ{TS9|3*oRgh;xD%1SMAqVWH%@r6Ym%`-C~mc%$ZNeHgLfa z^gd{6G1AItK7MkHGa4kBL1Nm?n1(J>g2=>GdP~@0KG@rBdkm@lqct(fG*4#|=u>O& z(>$%G7=s)HDM{qFGm2-G2R4|{imNw){|xH4S}$Jif~K2(($h_uiiuN?Cx!e%{Fln> zWz9Al^F1}xkCFtvsMCyQKyHv~71{Cr5(fg|I}PURJ}bSc73dky70F2-U-ICc7#>~} zE6~z}5mj(F_VW$}2jdDfelX6<0O9@TXr3otF{Ha0fV&as-#fIR?i$w1nogArA7(Tl zUb&o5_aBu-91efi|LpFc~a z&4X~qWWQhj#C8IQ}v2pAis8edVk%HAFh)GI*S2oT^kkpfvptA#v#STu5b**M0FC5rndnUSo!VC~|^ZT%3I znEn=Vi=iNp)aQ_8P5And_}x4XOUX47H|{>^V}CCOQ=OfMOi^i zw)FxS5vzrKd{{XeInUZ9fy8@l&8chqA1If+{Eoso$a+)V+rMKWw5cb_P9%QR<=4Gu zk|kYGojZ)84e}6rW(*7r#U7^0Z4+8|VyK~x6$ zH?PX23#5Ni`64JFg<+Vs&^i7hU|}Gd$`6cBSlJ8}``bq4h@~%@btOH8moJJZzCpgn zZ~vOi%2QLw{TOT4Wo!T2*WEBsFT-RzjES|acgAuc8Tu}Y(5BS=PSLYKb=kwTUSZ+B ziK|57=l-=sOr}-p8+7oT=rSffd#$Q_FRBzZKplc;Ci7M~FN*eR+{~@dz!ljk1Ky{H zhYty;V*I=JSt|;w99~=-fAPvEiVhpNVVZU3**Zb`gi&;OZM_tF<(u~W&5VfhOplhI zFfB0?@^7}DU#Zp&Nu5ANxBP66CGUEp^7sJ-zXPEjDh}CP_uvO_H-Y!JIJF~NiK0h7 zXmgdy$=(r#J(SbCPZSZB9njvdD#>~h2m_jA>4sYZPOO~)xrxo}1)Khu`+*{$A7*;Yq2Eb&I%zD(@zGP2M2Vw#!HNei(WVt&So0Te z06)1f&zk|nqqD5EF%lAqPh}-eiglEDz`*)i5$XLgrJSJ+mfGTR%KI9XV`IhBH;zqi zJ(-ORbnJYBs<<(CqZSx@>Tyd%K&=q66*qCAsLgV>_388Zb`3<%6CrSu51%Zirg}QQ z7cX~HQsO-_j?EaJzUj;dWpg0ksIwOMb`*|%2bH>7xw2~yz)6uy&{%P@X1hNc7rysl z&JqDIjy$XmRSuH@5%I6`CfQyqsH;Pr+Q_c~BfuPLNT8y^zl9;=HxMg0Id$_D9u5WT zPJg-Q+CXt3{(Zip{p^?2=5lfouwJjdV)m^cGhv7H-Mg?#;mOZy=E6Dg*P_i+i)rWt zWIb(D?SeD+&~bt?1PO{SiqtBicsFNFq#GG&{`;xHlXzTv z?`-4;tmYsc$ih@m=skXHF$Wj^YPk9&+&M{2vlJ4y+5r^D)Rkb8T*ZhK?v^&Ca)%Wf ze9$TAad^I;x3KpW_uAwz+Z5YzX+eFtsz@z90#>&Tv2lpn=j~!UIPm`T($KqJk_5ga z%GQ3$r(qeoxsXfBM#`HckuJm6)2wVWA2oBMsi+JM>J5e3CfDi&*rFw^q7EV(0xte{ zR|{9M$lbdhyRRs~_)WUP;2mY<#jSh_0jB~)I`TUF7u^LK1|^$cUVw%dH^FtaUB`B* zEnK$pMQy&h-tSwE)8PPhpt*P7x{w3);p=xfReFKs?#k1Q53hEPBl1&(DO{Xj?Yw6# zBNH%SG4>{mLbGcyn*Y;9XswRy#9=0|mcLi3R3?9)bfY z$hj?xczM=IIBPXlwO~-7(fR4wrc8dDOW6Cj-2=^BcfIMdfKiKMz<3*a&odF7S$`v} z(#RwelzbEJeFd{HjZlhATDnDye)RF2j8@^eK!Gp^hxW4PhQxaR;T6I`mtNE!*=p!mO-nCbs7sCz z$(LN;gaZ2{uM4b5#dcu)*(BB3q=pbfVu7MX8ah^VXGcJPdfF2w4M-mWznt3Z14g{r zMZAd|#(kig@H%U+VPyc{=)+^+uEUX4&BjBL-Hw;;uX#ue*)>!fDe&sC?zaR4$_rgw zC50t@K$I!*W(3I!%|RA>gSn$_()~f;mkRhiwd3)3RL8v|Z?ddG*&?-4rL4lu=hmA? zt0EDk$#J#sxdaNbn=~LgWx1C9nNF;;S`c?t0%&V;0kaPm#lJ>+Rw!L1(5Z&sVW4SeEv z&JWqVho{?8&ym{8YX-4~?ck0n@Dp$Q`L{MnQMl#xXkFu>=1H15@8_*KyIxzS$m2k!&#M^2GHu>eph z6hO2FI-vchO2g`T%qqB^okNs|f`T3`JDM{Tti1JGs=*|?hA1s8fv?`I`vAxf5!38}* zz^_Y^b(>JK>+7B3#=-+*@lPvPH^ewVrqgsDCvMaa{JH$q_m*~VD>D|#>W;PchCYPDox!P%F0LJN19yumY}`Z9ljw`n@N6`=TAok{ z-o1ILo0$N#m97fGLPPIGeIMey?C`K+Q}QT%rRK_vLTPvMdd5pHVe}L&5=PQ|C{pY) z0Wnl_)QqlNaoV#_sVvB(pnKC#(mPe_%vGNe77^ePZe;fd-LGa`=+Aj%u8c%Eufoyo z^hFeZs9$|ysp{?R1P}FobS5DCB9Qi0QyTiQ7zOws^+0uqRm1#6QS|KQnmMj|r3J~Yk0qw3+MGQu6fawMUit;xLWYCEFt?-vN2 z$A=DN1jok^@7}#=yH|?Y2K9$C9q;I=vm7U66}|?^9Il|Mt-Zgc>|) zMY!pa`2LI{j;7+5q1f)d;5=pgVYSvj-=`M%m0%)+;qhLvCk^nYj;~iP`X{;z zAvQDeuRzMxEqh^u_$T|X%EHEokIWDmK&pFQu==AE1ycjLXE}O2T9BFEj3e(w;27yp zfjafG%O0<^>{H14bBny0nmN@Uj>Kv&jdKfp9{-x0B0F%ouYZ#2^gfQLagEz*G`_L< z+$H~acJ}hhMFXt@+A1!Uw8sVB4D&i0mfta1cP-pwzJjh+G(*_S4GLPFr>GiPT1CCt zpFp+smFKWq4cut)v}d*!j*NkcX{;mh-Vo@?pbDw~>=*_f>e05{RI*mCOdc5N7kdQ~ zIMacyGE$?suM0s75w%4BN4f@8Dt-#-yQ0-?wsYafuO<~QTUZwWMGKz5s|E_ubDdt> z{99k^eEdJYZY#5;F+cu|`?3`t?N82mBRR z4_F$@4f1x|-F62Vz&d%Wbq6n|8*jZ(Rc5zev+i%NgpImS(xjNQNLJAXxJGz+M9evn zLRAa2Yk=($=d3u@{~?FK8Ogm^x@9{$zZo4PDuruy4Hg*~HyuJ}V4c!ml|DG9I7;^} zf^91vy9*yQ3K76`XoX4H5$**bN`n$8XluA4G+@LJbuj%SRPv$(yo3Ecxr?1Z5gA1K zRQp>?;fMQMQHrkaEBY3$39nu+Z>R9hfh;yO1=7W*Y9$DpDq^Cs&~Ta5tM%6&Eh+i5 z@oa^Yt(?Ig1%^t{xxxu`zTiHGRNjQ%{nmeeC(!a&MTQ>cmJ#X?giK6O55`5YYDJOs zPi&MieO$~HOue;zxoWyRW_i8b2Kk<^sV9dR;^x;$&|6LPyd?Je)$tN&_{Vr?sN%rU z?ZO2>y9@%1s2c#p0Kfeh=$%cTY1t3M{@p!@cO#-3zvFp1nEc1rz( zQOtYE?9SzfUvn-4|M-`=sI`OC(2YqjwDs4&x@jAJJ#bF4ZvG79O^0^9KT>f_j{j%3 z3Ia=KGqTP`zBh{tN+FWZnO5uCxHIrRBWjtB29JuQ`2H|}@bd|NKFdxdXfv{3SB|RI zRR13>W`7;@NWWgqQv)6MA05fRf5QLEA5QhjTB^SG*OP;= zD7&_+As_^F1K^F82f!zPD5787@beX-9^f>QV|2rVU)O{{huia};J2}XpKm|C2Q_G{ zpmMc8mLDg`Txedd;l-~%{*PDVK|qFfCiP6@{^L!*avw_f0mJ@Z^Wj&HcHqj zjv6L&tgr?V>yCoUr>zil{M^iPYN@lAaE55GbA(ip)FRe`AE7Me>6_4WfA(#rWHCRw zc!}@KXaD*Zw4EZaBeGeLWsbUDz1mM6)X>aD`G3nW3mAP79@DEoqo&Rh=*&YvM7s=4 zsQlkMKU8}0`CO+AC|pQ`vFj0XlM@`Xe|sDeG~}~ShO3DPrs3cIr?{jU-3RkMpMba+ z`tkSJQ2+Z)|HWCSAOAQRfLrXLsIa&B-WXNe`;GU`9^jY6fLCp_K}7Zd?DcMPS3oHO~J(16}`5$pDy~1CZkp0E@E)Y{;GhD9@WSZ#Z~<+Los@G6(hPyoLsOzAv%!Fp?Pa1B7+$lSW+4UP5# zSTWL>TZQ1pHbHg3y#hj50w_e&mFw+cAF%}%gSUv7HK_n_<&AxfTzwV*CnS`DE%1X| zYippI!^(ck?7y-vueb>1HLyKNP3Uk#rOROk4=mBU%&ZW}E}B#Jjr7IKm-a%fE`*2x zJvQ|FWP?9yh8X}$Dd-TY@EZcp2P?-gd;J{X=g2rZI%Y@Dj#gEmlVW0EWH1viK&jQY#yrc=pr(hIq|7|Kqmc$e6K=NS9g)Rz@&x9HD*jL=0mP}^yG?{ z0RqRRlk2-GVv6B$c7h0v9k%o7&9AvwL|H=sh0Ec*X6y{`QgkoxZO?Xz1@h0+zwq}D z11Ksow{QC~>y`KRcAb9>q3Ah3BDr|+E2kk_)kdhU(}+V_&E76ReYxQ>27%KwPy|2^UbsXB@DnAI4Z<)XFB$mT1-{u@bQ?~&{tT_0>mU?@|-MT(WY~q}&03v<)1O z#zkBcj3kg43zy&JRzKhnFjIQUhN8k-^E3xXHYRCygI@S>BMkJl+y+>r(G#J%NH8yA z-sx8=5Xj8Nm#+Yyib#*k6?U|&e4v=fcZ9u@Kz#zV=w?7go#arZ&p3HZvvbsO?&@#C z@KawVY9q)S9VZ+gG~{rawqDXIGz)=?U}X0Kgp#fj7ih6g*|;}m3V>_cD$4>=Zk?J)fif=#m;G#~sL=i* z=LdyzPnXAK=c$l2uofVaG5XCoSThg`V2t!3LCQCIdFlEs*78|uzOPU4 zvCT~ZN=%aI)#ze_{V95|k2(Uth_&XK3t3lwmn7k{O=+q2_D0-Y{fJ|-JY4q8#oXK+ zG}~Q+WM`6Zf3aY?!}F+MaK<*Nryg2#0GPq|)2C030Kd}!tW(SGxf22)&UW|5j#&Zl zUP#TsG#l-LuX^X4D1<)3*$)`LY4z(zi3XRYqJijz3NGcgXA1D7NxkBSQCXUKw}5gp z9_rp=t=dqApk~${I=Q#BwS~K`R1F~a&VGOL1i8!0w?{!ByV-Hxj*5$`S1xP~TKNDV zzFOTYX5w96db$ry>moB^0+~|1?6xWj0c7m*^)Lv}>v$X=Iz`=dEjEMi31o~>?p37w zdw9-n>xhN7w#qqweM;QAQg)QHu&oBrs7%1tNmD_v2Jl`{M00a9$Cy*#$`aV=5pzM@ z2NjEB3_e*O5C*n(sln@kezT}9`S9^$+qSnU*W0&dv?%~(tPv#JrU3-#D|CEXa%Hu2 zizq(3Gyo0M0dnDc5Dl@ldlY?Gs;q;FbTgWHh*#a9WD7r`C)-_AT)piEt zpdtEM%Wj9@x?EIvW^(;{5dVAz2h*3jY26vD2Hp*Rb@u1$R|=|4+er6G)s-Zo2vzTx zqVRp$CqJ}v)5gJW$ubNT8_KNw=)$h;2%sC)2Lp@Kx9ggvm3LKLD{RLcmxr}XZ))C5 zY4bdoq}<}es=9~L@oIB<1U?}ixtB>=DthKp)Jz7rTwyamXku`R z{62r*C?dwHyy=Lf>|m{B!M+S&M2%XBd7d93T!7W+HAH*Y>k$@oVBpgCtY{rt*&Zyb zAN3lkuS#kGehy#i-KqshLE5JBRjSiP8$&a(b_!91RNso5pxs4anI-v|K6{>K$qha=fJvrfeyKVjUq)9D*@1-ObP>cRR&lZA+z%~>yHSuOi!R=i}z z9+skI=~N6K3H3OfBr&8Bjnm#AdHp@%AM>}y{R*H@1|jx@Zp@|HC6dt;kC{eH@<(r1%ikJ__@bZ zO>_zNs!scvS~Ot8*tNa-0wT-x=T`&St?3rv1A9beo9xO~)w`x)OA`QM>pdL8jo(-t z?CI@d#*aP}^{?ipAR#dy=qdSJBrgT*5?)*XSNiA|x5nIxFVDyDY_u&s#UzgYxaU}8 zTJ-+1DgWDnU6leM2x@d~A-5))r*K8=*@f#pI%1{^yqW76)ImZK_aDKD}=zKB!<(4QPqgMPty4lYZsXL4G^XJcLHPJF3 zFe-xq&O6uqO4U}fZ64z46aanVsySxSDdrj*+`E~gvv69;rv7?!QW(+H&vQdyI)MkcX7i1qoW@&?XV z5E_>~`_pQ|gHAD=!Q=to7klISoIfF1X&hUg=A*W>mfI9w;>Qu)ib~Y#wlQA`Dt68F7q3 ze`^K?7*wQ?7&c%DdK+(2g^Vq%x^Coe)>R6v_KpXYPj+r)7jDpsJ#lBnT1FG^67ee; zb&<`f{Qs!?%7Cc0wrxo<2$gPILJ$~QO1e{0LJ$}lq?-{H1O%kJVI-tW8a&e74T{ns zT>{_Q7zfY8d7k(0_iuEDnZ4Iu>(1-Gt~H4(A-W~$0+)<+h}xPv<+^k((p6Rsr6QPt z78=aLxO`#WVPh}GZrI4cT1!hSxt2sdy`o~Yx6dRKm$fL~P9y-(3-&Hv*m8%7iBfIN zM;{#R4OD+=ydD5U_5yZKOBGMr?5aKEp+Y0Y-0}zekLeQxc!3hQiA@(DOYI#}2OVjW zCsF-1fB{lZ`nBSUQt&P5%h*exe}Z@(A>Kqdo`ktiq-#A&@nYC`bO=0ncI)U`Jbe9 zhRhFV6UJVi>Xu|Iiq6@N3=E`($XE+BC%XGDUVKg9I*C-g4^yi5r=Z*0D$xX#QPdJ0 zaXitc^*HCdeD=N-zZYh?2d92c@kOqr5(?&qQY=zNP;lBsOg=89;KEt;tC3GXV6$_S z!$w3Eg^|6bz-2~f5C$C>rzmCt#X1$BugPI8(>9n`OG)}9d59S{g_XK_4u0khuc?J; z7d^DHy?Tbio#}qCdo7CZr=v__CXfcIolbPtqC>2%AR1la?7<8O{9y};djXgVvlB$No;(}hftS10*}N>7a7dXuGWC-K zwge!u%(44Vpj-dW0KBiE>rA#QQwH_Lvd+oGiGPuWNdV!ndj%ONM*Q7?^?+AB?%A!8bPIZUYP=1JZ&OxJ-8F- z0|mc*-42h4xHDE}%Yw|w$Y36_beZ#&;Y(>Sf^^&A#TT@F_N(4iU*8T9$1Y0VzP-<$ zwjW()!QLxkT?|rE$0UO|3CRPV$#g!`<2t{$7GiLgg7o?cI+EXoOa27g3mfx&nS>rP zhFzoifNU9gPMT&ysq5m!y-5Z>z6qs1uV+z7;}c-0bctM@UG=Y_u2zEBpeHl!vA5t4 zIug@YRkBn+6&fR(?b*tS(d!G3%m_7a*?H-?5z`yGqLK5FM5}P_N8}#3-hj; zTc#-$@uV45%<4638Cj9Nzxv!|d(56P6SAo^826RGgdU(Xcq|?wdnU|D)V&D3c@KWaVeiRUVZ&Ar>MhqqHo_xPwl2L zg~K=BeFux>5vM_rvA5xKmt`MdBQN_@pRF^h*f_2QBzy%^&~Q%Y_ETF>{BjsD;W7h` zTo#T&-01_Jbj(YoR2hyeJTK!if#yIvO?0!*q|!$2=l1>t0bB&Fu(0qN*h0;KCfWk^ z%qT7|nk|m@h>>ZQ6UE}{g6;dcwCDWgF`~_3#3^DN*lB?buYNk(9_$rXIP{!2 zfBj5d1VFF(ns+JeR6aL;$qTdiWDtq+SOlMYm1DHFC;E4qJv`Y0EPQG!f? zXUED*dWc+e_kj$Xmu9Dr3{UrHQcvcn`_Tc!LFvt{(I_uUq5td-2$*|E6)o?h@?L<; z1<8~=HgwtPi9fu-x7#G^V%{)<213qhCEE$v*w@loa5f*nuoS88G_jRd+R@YNI&v_w zk!Chejc1M;8cLv`n%knjhU2!JkD8)zGg&b#CDl+}^0v5`)cF$g{n7Ym-|vIIuhCuB zc)u>g2RQH-pg~hu=i455n$c4asA+t2T^J3e&~&&^{P1wQnw{=W0`NgH_dZUyMOQxS zT^|oiPGCdiWck}f&oCX{QRsnpA z%FQsA*jN5$AkV$!{W&FA#wRS1*&mCbb0V5T>eEtX=S1Qbkka7nrT8*wYN{)|GGez{ zoI}QLUWohVz+ zSzP~MICSmfcigYZ+lPHmO0L`b(OHK#K66}&PE8jOnJ11WCEE9dAYb21jZRSeAad~| zT&D56Ui@gVpFw>m2vMsBLb7>_XW@VsI``4jrkmY>lDYcKMqwCaSBudLXg0_CswDRkS;m=FCQ~?s+_9VTX5_T*j`&2N7}@ z-}g_rYalmJgiXEF!~rS;??FwW)rNkHjrS5<9L3(_+2J8W_4l;|+Xf1$Ji8@zYgt=E zq|y9e%T?`hJQ^ES#6YrO^&WP26cE%daIf(|X)mix>e(1fO*bW4u0~tbdv-9_GJ#gR zd|}BezNU=z{n~g;iT-pRQ>4Q!V$=xq>6nhd7K2sm4ZTLAaKzLp=DFwN z+miAYo?|)|##nAg&;%fW`de8f`38O4PnTYAOGruW@EFIgezu9+?sBJC9M+S{tsiV3 zB1+eDv1r#|Uf(2&vFfGNvw0+HXqg3*Oc3p;V&3!@{cirD`kQsNYy;wthR*Nw0nZo# z*n?GHUL^S(vjtdd!}R<)MWA%VFxpvo!UeFtF9=gTS%6(wwpk(4a!9;Ld4Eu8;H)(W zf-{OH%teo^w5S*Wt8J!1FmI#tx7xEyV*&@gUha00!0>M3rF`lg+ndhkA$)fPR(piE zb+^~U7f1xiQv#V5Lygj`QP=G;UF7h99}fGNP4MO|vlk7^bDK^pmD@KSDXtwYX!E5} zQ}Mc#Hp#4lBA_QDJn6~Yk7H;Jy0lA7A= z{fYDGV(BO4NqO0>m1X|(OGO3<332hlXFmB)HZGxkeS7_^7k zj7tBn|781QEgW?Z2kOtKTro=o>$0{#Rzq}ER+A3Uv@^q*xqbPc0J!U%~ zE+gZ=&J#fgN@y$sn4lJW$+Z!Z{Cp3de;ZoUd+V8IAbtEb#=4zJlVkTwurS)CLb&BK z%!s$aRb2;wRXqwM*}Y!a%SN|bQSN@Ha$Vm8wT%!Bi^{$Yp#l%U?|=L0xZ_MD^&230 zA6fGXD(r&hgy}P3Q02LmQ?ceF6?xgMr>e+gKxSju2+zOjT+TE;dYu}7fgrhUuY8)X zQwR5A?uRc?CfQ827oDoG;s?9D#Y;bgZmzbO2WH z9J3r0cj`xzS;eM6{QL&u>aQ){JWNB_3?qT;nvVg=-cW~|!CsC#(QJsoa0kewdfp!` zY;ckK4akuIrOH`1=V7Gw_s>Mrap@zHl9Fw%^@rU2p4+@f6^AzGv5UywnpJ)0!Fy}n zE#q+E`__CxO7Gr!PO5VW{et~)!wgMFch86~!>d=T`36gbbZAnMqetZ?U%Td5Qe3?X z3D1^(ExYifV>#+f_r=WkWD)G#{O-SYdKsPlmUIW2>!rEt%>57*{-3x+;u(lTJsE({ z>wLwDa-v7@_tC%0n1CAwYZPFSfpnE+ykKS(X>g0TvmxYrn1%b;+V7Bx4Dk9_+6VJ0 zw-!8$SE2S?#5lS@>w(y ziX%{pG3owX{jKgol5e@qd~ehJ=e=jYM{)7R_*4mGyOsgzSbeY=r)$#=EPE9B#^zRd z`ix#JIZ-%mFH7SIOHNwLRHMT@q|iv+fMQ8B4Tp{TRcuV-tl5LV;*++!fl9>yy7Q3Q z3ov!8IZuckRQ@GM(%7uuh!eyuw@Eh=5LcY4I~NJD^-DX^Hsf}W4kwOGHXg47F=S~= zFm}{+rlR_AGsnOxUUj5vzZawhW6_>f%fNdItX#Yew?fPXOe~=*kaw3vHwksc$Qa%7 zU0l4l>vDLxr?t?_JlL7?kcmvndU$JluteK^{9MXxAIr%3jKwJn#p%+kf_qV>6ccyV z%&NRT5Tm(Le6aFsD}$4h%^KtH&u_gE2}?7aSC;J2nZpe(x9FrO*KnJBDt?y=Wvr%s^+<8 zs`vF+bZItJYLz$wiH8D#M^1C8Mfp3W3v(-yEMMT%(K6AWET9_lVC`e0D{9X>mmP)|q@FSyHFHS}XYL#9e03ZknqwylZ%= zf?iKJ&A(&y^a9T7FPq^z0K|%p;kNza&0p64|zG zUF>Dj8DNU3hnKIDXrY>A_1c`R%aq)}{Ml6g!&W0^iAwj96kDlp$8!qX7^Nr84Q|#( zyBXgCwCQDPbD39Mmu0lH;^0xcK0fDz=c^LWy2g?A=>efCeab-A&2KUzgYT`8j+QfR zo=R=Q35wySgSoX5ZL#Ki(?%}F2`D!}*@}haGA1tNqlI^-vCL0^BuWzE9OAq}YuTlIy&#?m!l2_rPo@|06jP1&7NlkU|Pij6C+im-3Y=7Dc?x#UVwYEz~=y zdOudteVr65iG+2-Z9vZB1yXOpe871r=%!?=t0)#;O6y$L@#NYGyOLJgB{SX@{myIcJ=nPQ{FQ zrgWz{o!MaYxrcvZdj04C*lYNlPOY73IIE7;&VEFiR-WXpi#Os&COu~H-=6dIZ_i_* z{nb{6YAtYno!W}``I7~>Db5|dPuoMH+?C)7q|rZjM?xqTT&MPuD0$an*F2iIef?Q& z(%uBrwkMy=R&UAyVvSkB2bxi|#!Nr~ssN^>(zCemIo*K;$F)Xv`)V?B3%2l_*L$-F zF6YNkMX0?uZ8Fj;N*#zgj;_FJTa|`-V*^nw6aL9$4oJnp>tD*@S6b{nUCA1`eb9FH z^I^`?d}VQ7an$+q>wOw53b3%yIv!>1LrP1k2B$9nBZecr6zhKK&G+rE4 zyW{96oOCpPd0xjShtS2e9;7E^xu)1@0t4lOqkcgAy<_F|;SD_86lgEUVpmCz8$H~O>mFakHahKC+NXKgrbq*`G zr3Se5ZE~MoagVunr*o>QnE3%*tzgZ0ch6TAZOkQk$@Tr@^#VQ+WV5mc%y@K&kge(> zdFu5i+-@!mWN&=-u6dZ3lkCrQ^W*lS(fHP1lzPi+7W(TVhi$DIC6+nJBg_mEl%*_> zs;zLH^V)xKzWolI|F+-Y)nmF`m!@k&?_D$Jv`dyF1(po$W`O>aP+@CHzT)!?m;E+W zn#K+mZ-1redLqp>#{=!{Aum-Jl$Op#FLFs#<7yZwwOb(z)JrJ-LoK%Bk%Z!n2!? zmg|=mi07HIn>DAGF&Ex}sWKmBmsJw3Q)7j{qlT_qI)+W!qakpP(=2;df}?Mmk*|oGEIEh#-#gxa92D_pmSDwxwPNx1-P^po`Z& zSJpTl=`f*1gDYPnmfIk--v%%jNseyhr_tx-6u`lG8mK&~S9o+0V@?T1b`=ySPb<(bLdem^O+j z9-Eo#+Q&0=zNhklr+#>Vi6ufXc?k=!mt1?mfIs#N-vmq*wHzu*Z2k?y8o9agOpAo5 z6pEAf>qoVm+7z9+U{S}XIdCKWCS~1-p3PZKbnD!YANerN00jq21oG)H&H=H>Gu_Wu zE~n_Jzx;7KrVA8Y*H0kM6Kv1>k+93#B>o3(@C<6#oj|*L+(N>_@Mkm<5-k+UySL{_k%){T1Fz)bw-;A*#PjhTs^Aa62Cz0`9=8EOZ+FIG+kLmYK{) z;;6DdMcOSfZ7oM%7sTPd_IBne`#5R+EG6T@C#Y(+qj|;Yf5KGT@Q7KRvaC0JMmO7? zs8#Hi=|kEy)}(@?La$wAq6utc9}45CuXL7t(iGGu-M@{j8@APmW%9R)-0KPBYk02f zM@p}&-4I5c%*Q}}g%ki%A568X{Sh&pbPqq@pcX>qM0?r9G<7?&U=Bt;fx$5dv~FBFqw9X6* zX>?~laBkL?>@%g18(eYs=4>Xo#FpNINxf?sjEtTqIwlEK{D;{T`ONm|ro*Mhhcd;d zY+m_mmY=?|i`fk<8^AQObUcA-3kum#9b=fJ?IudkI7av*y1cw4)c#^d?Dq|&21{Wc zuk-TrC4k1{vd!0LzB!?#JLogccT}FS)*tNCeF>JWwtd+hG|)ANPWKohDOc6R;X2Xy z<^P63*y^mE#^aD#vD?g2%SU4PcUSB8D|NZaml{-0+Ydj6Pqa+8amTJ4X3h3eu2deS z;u9{jdQCXYBnL(IeT+{-wUbI{cpG_tJbFL1r`k6shPHBlnqxvk_hzTwXrYl;#Zm1w z!#kfWDPr4mFW*CPO*aMgDT7s{{4<*G@>4&Mi>DY3F;q~955Dft@c;;VGC=*V!1f0P zU<`*AG1r5Po>-slO4dL8zMIH(cShh0!V{IEhxv>D{-La{u3lT%P04}AwXX{)DJi43 zlQS}m)d~!J>G!x)Xz4Oq^Z2+|nCIMQ@#2TiiRe>IEo~n*8I^JBJt{HJg!f~YKeBAf z(Qhq1w>3bxfAI^k3W(+;!w(|zSRZBU`F;{veYL(GMIacN{qj`cJHH9h+wAPH zii-OHxf0ad+k1;MBO@bb4-~2`@_7kR>2g6eQ?2mrv~*Jk@4ghkt47ttdGrHwnu5B= zeFy{?TRFoSwHbh7vyE6sM-v`De~RIX+uD~4M?oc8` zHnq!aHx}AkyaJ-&lEvm7;X$976&Z_M&vuz&gL7IpWtBMI1z1XUt7iurN5Xs>>dUv3i)G!Ab8vdetiOoAJa z?=B606w}qMxN+--*wd%al$4d#BI%k_QhL?rpX3tr8vDiYd<_rkO1B=g`s-su3qf_6 zH1J+6`z!7F%y1N~j?cuAPn(K@f`+dqH4vu{FfDd!Sb6=I zD54MtQCIiCFafmS+MUKi0p9Qm{P9@@P_I-4U2csM^}k#@Iy%BZNlDzmAkAi>|L*5K zS+7g{3B@%pvB`v1^db1OKLvOHcoRF=#R=ySc@42Ql0^c7K?{OjTdie2v?aAJsyI0* zNoZ@4#^~EDHWh*&uG*aLJdbxwb`ZUHnf2k7)rrF)w@VOuItwYK_!erJa@d)ZzDdfIpL<5#SgBfGBEpVFuLdCXDYA1HrBx#=wXW`YGA_ee~~Js*CcK z7GaUJ=P%faKNg6)fh-J{Q5y=1i=#1`i-r9CE8l>uBk7b#mRs?hAL&681rTzw;W%f# zdLSFM)#BT$EVH80j)2Cmx}RG`E-_^Zb2Kn6c2+-*_B2_g@VU3oA}~!=ePB zY+*GXwI4kDYz7OXS#(qfH*Ts@7-8#O45L_bB7`qE5;>px!~6-XSQVT7HAu3jzqkN2 z)&T63ThMhr9r@(@&T6~8@*jVR=!d2(Ov*p|{T@vHZ91V3hS#ipZTSki1-8Xf0HO}s zG*4%^|CI3JH^JA53g~y}BVqX+NAqVA60?{;i(Ft~;!J(Web3>iVZ@`Mr451*Y@o4f z$RPuap(BuOu1JG$oUPI9vQSG~GK~HWXpNA7s<#?L=_KkL&?2rZw%H2=_4iuS>e(dU zFF=^~oeFaKCEwv{z#6ElN&xxd`-eMI-Jrl*_Hf?y2Y_Q{t3O(5^%Qghn?VP0DTCK_KICQq@7~+h0W^TN)8z!XM;BwyW-K8e zk3+}m<+6n*09T@*syb`K4dx6OD!x6KDbJRQf~AkM@n|8L_CR0kI)oSo*Z?srtGxH& zvle@hs4);0PwcNuR8G!jgcPXnZ;x89eZb{wY!0OpMhee^&Ku_nDAC^qPz>9?Ew_ga zwm>eo2|yWcGBL?RVz^KdX&YPamkKZ{R2QugH#!j+BEAte&i1PP4n+AE2s^!e71N}c zaF>;I8w0Pd0exQakz(oF1${NBZCb0~S_=r1crW&z^#jP}myVrBP#a0seXmE|md$MV z1|$VLQnkO#4S|};cnsa#svtd51vNDz|E@Er7R{gvsSoOTn;Ae)GP9qJB8NLd7l7&v zOi3%`O90xPfy(6@0;Zls=*=y6P-0B-#k87pWM%;k zE+bP<|0xieu$jRtfw7bTBLZaH&s~37w8mODds8&`hqMGV(QlK_Q)5TAD`6gal@>Fuh zLwR~$p0_ee+xg_`KCnY0_YrQ!T@z2oYA_Dk%6t#EHYgK>-5VOnt^M8y%;rU@ zJz!Cu=&gd z675&Ur=1m#-hU0#h~f89l;IRMXpMM0o>UyrlYkNHGX-kJY|(Dp=o3qvr}VjxdK;6u9HAZ)E0x%JlTXDTOv59cz}#*>xlIn{?#i6fgRN7C$LHy|-$H`dy-dzRgr8?VjFf#=fL zOsmb^HUlVT-oqWe^@mhxap06}Whc1KE1_}`yfO&wOz7B1E=YWDdeEBp=Bhi_<;IFiE;0beMM~p zJpc3dD3((TRh^$v!LU{StVX{&XfS>SZHBEfSzMltKt&Z68Q6wSceW)!4Gv~*GP8f* zK=QH9Z52H;@XIYw;y6qGsEhx?K1!NF!Q1HeYV!krmzCc1U@N1nyRIe`;0&etr0tXx zXbm>N6FeEzH_Yr~+mqja1wbWM#TT=%tLO?amEb9jU=M@9T=#&u`lTvdwvf3x9@fr` zhne?Tr+T(P5U!La9c|!$HL&hB7wwPo3LZ0*c8avUI@L(Q9Z{sUR=HaZ1=ai_hL%}@W*wA-h?eBGJ z7IUE>bdvy(xma_!le>KvJ*l7<4r?@QM_}LlY;Ng-V|mq+rpsMt@GE7G9~2LI1<~wq zQKsVi^U_LqF#a|LS96&iP#!seWRo7}oV%0=-J7#c=s zLL+(X4kIIbk2d_KYr+v|({kVvk#Xhb=XVcP0Fd_;tOP}k!LLD*3~!KePR%ZTt@t4F zE=s%DmCk0Pe(5gAkl1U{rzJ0ZHv}O?60WN4@Fi^iu6*|+H{|WD5Sn&OldoWFzX5=& zXlbZ@lnT&C^}7%hXK~TgzE5O;-E@#TD)d9U8Xg$xQLIV2$Ae5uYX-*pP5{qVMEVxg^MYBoLn_T68u16^n;|duT~09b1P$t>BZm-B zP7}5C@)ri^w2B7={R`9u8Ez1Z&YfC(fzULoA2D2=iq6irPt%E$7H;cgJxyR!^goFz z1(RX-?VNybZ;u|_%Uw;2HeXXMS;momvx1Y|c>v}i!vHE+%-lTt+Z(hu#_BjfFY03AXZ8NLG>LuwUG+fdk6LRD^VXL@EN z4Gga)TEF+L{95_Mfeh@r$XL=4&7y*RMnG89XF*S1*x=*V2&OeKm^g4grSl>Q@Wuvt ziiaZy7PIRh5(|akS1>VQe8#`!Ub*qwq(GMv8l_9|Hv>f5!Rt1Fw zObpnQ!kFty_9A&Dqi+R( zV%$Bdy%bt2{b5sOfKvT-x`H>ke zR*&NJ)UvYS`yPEIW6`vvAy!~^imPwqu7fLG95@57FTn8zf$fjc!2onW9$2wXVl4Hu zZU;P!S{0?MFCRI}^6ox~1Y;pC`%5KBh)V!mW3UnjIFVPVpvpm}-N8d_kskhcU^tDt z!N^irTwMI1+4Lt$9YF}_5|V^>IZx+AxO+P=E{*}*UB)boTs@3IlQt?ZDKTko0eG<( z&Aog1?^D6(6tmT5p&ma>A_#(Dpvl5MC|tnh5R6fmJrUEfxiG-(DhA3k5d4I~e)K8` zs54pZ+}pcSC5eI)1v{)jBF!lczxRS2SKij?88i(*jzlm<@d)w(!rn=>NeJ}uwIw|I zJivqoL3JG1b^q~>#r?RMWU>kC!N*=r;8B<{?_H$e>Q(CBk1+UblmQPTiygkh#MGtK z=M3c`W4(_Tf7ZAWV3AZ#29m%b6U4v(S?%P~ikjwVwNYib152z|P8=w2WY~7}`VmM=PbAND z(zqOcpW)+1?n6_Rz})+-AFDtfH!B^1OH@B3?L_;}Y94rBU1<`Sr*H@)HRoA%b*vM1 z8RQLsP5O-}rB#1)!P9yYLk?VyYLEt9xt9>H4vVODn=0p=$wl^evK3oQEa#_@k^wV8#f0zeYHCX(=gXJkQ|)oDmS{8%zvsT^cQXoPR4Va>lL9e3`6 z;r7Xz_-(LeKk&7M>F}k{Gcbf%w{s%LH?BZw7u?C|E(GBObAjT(uHL@}?|%X8 zuk70TI!}KWY}<*U{E=tVTYjiKz`T-CS*>(gL7L=d}>?iBNKp<_o{YtjRN2wQ2 zd*wyN<>looUAJC3u1`0i;ohILA_YC%Op6>7Ft@!|QuS!vn!BVQRECl@%3_+Ija)Y3|(V%=tNBf(L&LJ?|Y)S1s`?(_bYk9Z2?}9!Z01)V7bj*zLf{nt_|+ zLwNC9NEi|7A0PMcRAEaBf@d2RgmP!sEu+ObFcalY3GY_$B)P4)0QIzWRDe1Z3;L1#U4fLN# zvo^q{i-?(aSd4)I7pj1GoQPKrJ zJM!jzTgbv$x-eUYDHj8#Y1XQb4rBq4U&-FSa)tEUBm)A(DkHtC9r1kHumiU-h=k!b znb&d;J?>to&0pS^iZYA?>p(+FI`<=(H>QQgc=kcn;RYw?du2^C!2%a}uApEowsA5F z#RWq8xAs|VPh?Y*7UEoRIltiTj!YaH00LyEVMFJ6yaO2l>L`p2N>-D8dMLYNmBRM# zUc9oL6!|F5ZO;f`m+dUrs$-e->LU{TAQa{9yZ7$yTPRUoA}n(7;@FqXleM10$O~Pq zEZ7A0(aW~>nr#Znk*14DGdH+jysQ1G<#TxM#x0~+yGJ5u<^Ut6UC(;Oan~Bwa7v2{ zY+Da)R26Nd^%_|;3fprA>Dk$YuFl*vG)>|+Cwg}QH=gwLIoEF!s(a36h5(Kf6di4n ze**-^$mhxL2uJr?xZRijcL5(Ox@HbnutN!arqDc!uby!_*53Z&;7 za|7F&nZFri6aBAzqa3kC?S~?Z_&=`cE{`^wnh2ehZ5m5SX zU4bz$$iPM{-SJyC(bF59U3Gy$$!c?U!LFsKEA$f2lWFq9*gU%ulT7kl&2Zq7~zf^ zM8!BYeCS7OdW2+IUtR24 zB5;Y5WY(Ccx&}PB5dTD7e>%(J!E|5wJ(cSq|QI8({H5gFh+10xR zrn;BJgKuGcCx`v%o_7H4+Y3hZ-qWcq5gr#xkvklqS}NSLbaDMR{|Rll^x^ z{$w-$-6Nf$eEodOPY*;t)m>n;>Hihk|13_bOHdc5%CtJ3oNY+ZM>pt*x?1+-A~CJv zibIM%SFPHw_d(M#G&J-nfGkI=y4u^qeRkpY@%x&_Vg!?+;P%&Cp#{kdY^y%A+!LK z5Uu`;TH62Hr}^(;xMTit6TgbLzaR1Mcf-MiCBsGKUn}zOA3WYj&uDg7{s2(@$Cdtm zw-od*Eg$Ee+P*ybOn-a>o>Xo8|M=}+JGtg2-h#T}AMD@i2!3D%Ask6i7fmLCU)a^t zx9%C#Du@8BoJ915iNxyyxCF0P48Ovvf36X=I!4(xmk@wX=1zRL9#Ln(g>wRb^9z^q zdqIC)<{5+~KFDs&`z7_wK3K_Zocq7s9-IaW>LlXU|LqR`SaE(;!0&Y_Ec}Jcsa3t- z598o!DV^Y!K*G@;Gf_4?Dsk ziTi&p8?-RqN+`^VLi3@?6DO{EhbC8Ne4lx#R^w}%O5ADz2F?k%#vjXBr@^mH8_i?# zQtkU3O)dW6TdV&f5O?Aip1})kl{C_$zLUCszX|AnAb?KdcAOE;FEJzz$ z;ICAp|I??v?}I1$|LK<_1{)Wcc}|j%A*Qha2uuKsl7;^DC1e`*A@9V(pEy+>{JqfD zED8@poYWcu(fp}uOOk>Qh+ z@QJnk+13VkJ_x5Eo;tnhqX7A^OEj9P@nE(2lTk-INWZ?Cjnz-RDRac|+I{(?l z@CVid0f$CxNpooRPJCCwB?QL$bn~+g-oRy^{YFm3Y~z$$g4%GhGZ!$=gL+=g1+ZbRetZ4$v9Dge%*78{U4Y3SD}{f;4Gq8;a{1ZBHK3d7 z#MqeN WlFW9kWW>(xuu4g)RXTcP8=9dCsz-$EEK#(xXgga6lsBAp$y=yP)o08q5 zf*yt6nee9nGDr6fkr0*A-QjipmAJ8|)9=HZWo3A{n?+l`9pAQkIXLs``|)S$e{t>p zxX711o5p7sop@KW3Rz=;a%qmRbP(>za{=8EWuvc>53_3QzNmB3tnVP}2Y@ zKGrLb*vMzjYGS3F$FgJm7l1qKsw48~IURG#zj5IPWvi6Dnmo<^cDU$~2gy}bETL2n zS4O(4Eiy`4+L$k({V1maZep0y<=^ZuDuYgS+dJ%aJm$#?DAK!nd55V*#>%_B(dfq& z!#N?R)gH(4FPA}gNsKEZ@`0D6(|6^kT_m?qv-MVEXG$o_;=0?A|8XRL_tfL zcJVEkFn-yK?>9D^^?zuu4x%qeRaj63&$( z=|5``!VE}{Z5!4VuI%|<5|7x&q9s<0)-AcmsS4g}2%SCk@Zb1| z@$68y1RJ|AD@=KfeM|%q^<=0A1%6tnq~w;P?ZwcaH>w1A*v;lHiyw#b_=Pxq6SDoZ+17vBL*R(owd0 zWO*IY?+m9vv5%JWpqwJe4=XYNQ)q<@h9b@QtKfVTzZe#wHTSU*ze_Vz%bQxXU_x|Nz zd`kvK>p{skyfsmxeqcoN(Sw4F)97h$w#YouADZ>Xf@UUY^Nv5AKMh zLWlSjY)eQk77r7!s9|~f@)I3j?gNNsTrA>;QT)&ED6~VQe2FS~--l?$e|I2@HYG~^ z6l1X__cE{-+t<3SkMBMQeLM&iKH5W8r-rVFj@M$d5 z+Bgn7YTOa3N4Q_bPA;Uk5F!ReP#;vaZX_!z->{44{cRpE(LrVhB`^{ko$C~$34Ip^ zBWapjno}||6m)eRdVA-O;2Rx^p`yO(L#{NoB|C-!5|Xs$rFp{Bb|%atY2kf=}j`aQDZq{dbEN?O}i% zEHC$N#jnbPcq;3}sA*`1=3W~N0Y0NoI>+G#bu0SDO4f9RQa@X3y`cTc-E-0mye0Ty z28$LkU-W_g0ztp(R7~(}5?rP`Twt{vV;*k0(qY7@`zRSq+@;Uxw2* z;8yr`&=>b3iqj%#ek%0}WuEDA5n2eVzweBX^0~h_AHHi)F?lRQe6Yo{adXd%H54zalgkWM!6oy0lL|Hu>a~}Y6P^>0=efH(R_3{Hj8`w16|l!xwWn^; zLLnXWq*K5Vxa2rA3p;U+4ljUcOb8UBLV+g)gKR=aWS>v$v3QKj!NF}zV75T20R^0j z2Z-!3pe14KtXBE-y*8LGY77~7wf@c(=K15fU=bmTKXqT)0}2*@uTkP9h-j|B0OH{A zntmSh8!^mZbIFofQPsi`s?i`U35o5$H@VqceCw2VqiXp{OMot+(^Fr>yp%Z7`dK#m zN!mBj*LpvG>+gm6^@m_V$LkrEwdQ=IvAQvb=Kzx7Eat>%Zo2r&H z&#Z$aorUD=5A!0B(0g8|pT6=CR9?P^PH~*R`urfmJ^1=HsmC!y^~gIjK2`b5NB?)x zZI^BW?YHP0Uc;?WE?XaydG*Z|_wN|Ny47!{hoImGU6a2+ap#H@6P!4b=9!xn$MO6% zh(@u}u(+6R|GM@+e-zPw<6G^~Ax*)!gFG!A-F=+$vNCmE2YY4D{e3E~=lnjH`hy@& zN+cw99u;59DFOVac0^9D^8$jR5-u*!1~$`w)XVrw&lh43_;)e>`Srh#0QF^z4e8|w z;v?iNF^F{DI1J6Ppvu!FY{RtAE4u6?2_HwH;L!jd9Fv3NdU^nd@V)|={=%(p$9%T0 zK8wvWx}wJkp((KnJ#uRCrB*M^((yYV?}xk4w1Q-l_0KT=pAXVo9YvdpcNh;$??`rx zdktzI*A3MlC4iA3XpS;x@6A||7nmsO$t+X)k$g~(LJPS~Cv^1BFZ;2w)SuD04DA7T zYQyfRMcU2}bOJRx>Nuu1(??M(22AAf=ddEd;kcSSE%f)cJ^89L3cWtoGQZ;k1{tLX^EeHfS* zNf})Cu|tY7RUB-S08A7slBOal=`t!%Qw3_AHLG43hQ5p4a;FAf$ z7+}Vo>>}O*IQc1tPm16Z96#X(2xw1tCKaCAiwDoQ6kS1q8@BVU>!C0v?oGNEBc9-= zv9u9-WD)b8G`w}&PF*a=*K!1Lm{0g$Kap7caj0|9{dqoxh%A8ZmV6(=Dc!r=n4drE z$F(-7&ZFxSpib^`jEL~Q3~n*$%LC?<;w=>fs`o6irJQHkb{qT*D|nte{k<*Q@MpUg z@JOBxUcxR83Je4Yl>oRU6&bh=P#Lr|M2eYB2<&~mh|^%+{ge7X3OarTI~*s0f13uL! zr_kAB4FyQPqK_z6m(U#sR^z|eXex}CuQ?>jHfI%A?obN#Uw@1SJd;OFUc-Qt_Fwo^ zmn`#UuR*>8G?Vh&C_W1OeY^j7wRx=k11d8Lnh*6|mBmAgdb3m{LyOFMXTg|uPnmjy zt(auEt%=*c%_8$?HnS8nDL>#4t}~tG8>uf0C0;16tTfYDC@<*9Fm$zF$+ShSg4}^E zdQ_Rp+=1kZ=)=ur!~TP%$ts!1*i_ej{~+VL(QH{v+w{v1Z4-8n%g&&(3zCbCugUBG zxVWGH#Jhu7%U7Ve(-=G^jGPWS|X+xQ7RJ_C*F%%|Eu`?{kBgJWrlGp54#?3StH;)4+ZU$!{>z71UppWw^>n2EFo6&01m z-E+#tXAcSq*CQv@bB3shhzwK9EUuf`3}eS;f7mnasaLlzE(-k&WdAm&N7DF$WslOI z*f(WI4hsh45po|KB8*L%1JR-ss)vi(_S`IKFF9B9y1Xl%r{qvoL0+L@WGrDeQ1x{S z3s)P0;be!0Qj97W0Cf2Qb{U)sRwG0tdy|c=|6Fe4{OCjGcfx#2shW=}kb*=0U|a*M zzcSXGmb-oE-MhQ_GtjV|-s>I?l6GNik4flHHls>m_DDkHLzGP&6Sa^*T=d57~J$R@*Wbb zgc={vciB1*X)HL~8i{+l81vW%(KSOPBUvAci(9=4QAMHpINhUOlK&_q zWVZNndPX6oKP5fraVbwWvlW0?vUhyZ#ZIc@9)jS*ODY;@>e8HNk7Hj|AfX5gwyi}F zJglY5og&U{@@grNA&+8{MOxQJlTX@1NIFnRgKm;8aPQqOaw1Fr znf$(3S=!|a$_Kfv=<@X1`C_#9pFHf8x(on?tddSq1oV$F#BXRIu8`ihveabZFML7* zdy?Cv+!f^7X>Bqo@<2C9tzypCpv>|a9S)|(BIxOYzIN){gc@|6!6iV2qPfWL+pbyG zR8xCBWg=1VNn`GZch>j<0VZojlLwFsi;;x?_yCu8=aQ+zq0SU>}g;d2C-`vTfd7 zZg0`l+FTe$e{KVxe9Q5{YPuRFj3bdXBq{0I6{S4a5inzEmAuF>ymYbLeu6yGp}9l9 zch;(%4Zy4L5ige?nq{RUf~+wmn>*z%Ph`fQOM*uS64MA8_Z^H@yZVNh=c3Xn=LEX8 zRJ}PEDdD0V%_Kh$C3FbyOP%QpY!<3|d(kYG5giA&BI*p8asI_7>zt?GU#5e6{F9kl zFI88ZIXWPM@FuR(Sv-mLCdSP*3_CRvx?=MoS~~Zg+VlC1ENG;BkMm>PyK=6%TlyvR zVf9w3q*B-i-M}R=diMX?`|hZwwr*cVI4anwN)->ii}Y$ikY1%1fq+OA48=e&7LXbc zsR}_#=)FowP)d+0AiWAg=#dsm2)qq`9`Ss9@7y=W8{>{U#^o=RwfCNV{?=S`t;IRe zP!cMiaUb0WqVH9@v#8x)J!Td0{syMGntLEma$*n0LxCdxxFQ8mF!=@Se*MOBij--a z=xjYv(cM|C=pixQc!jM}#hq?7ke^g#i~#I>i#RJM+G_4Sse4h_=|;Y~Lgfik-5QiW zo3T!}bv!uWll=M5l%EX@PEtXaa~&Wv?MRKGtdFyupDzN2%W|K@@mbH+(}z3h5fWMQ z>=si?V7p9ya;R*aJUKIwwO}jp!!ywKNla|6z=ngSXFQ@v*EfIh_|s-cj~#_NcT` zoqH$6V@qE0L`qm8y}fmFUr0u`SxQ0b&3`TK{|y-f{pX_21H2Wh5-EAWb}q%o_(isW zfPfI@x{;UX`XZw_jLRG%BjvVPyrfoGliD9$II2NX-;9m~H#>(FPpH58cKHe$XqwZG zo)(uOIe0}W(P82e*C$i%N?LE#8dXwLv5Gy+L8=TC!{qODp81zo@45DQ)-bfa|2NzK zSHPW5oyoSNCuT%`9^nA%;S38OFm7xr;2Zz5{IJHM*i! z7xEsswS_#c&+cjnu!~*GopvEt#BYaK(VW@+#@=y)f+U>v98f0LH9R{+AqVZ{b<8Gc zyS7YfKYV$ZW3DNm3FJ)e8;OQ7Kz&Noul|H~tKX5EKoXd8Dt%yLRX1b&35y;{tw znepud-EH#ENGPVN1%R%cForqiZRE(3(lhYh9QQ}?dj6~?T=7g4qZGMyWy!QH2o&is zTvgBOzAK3YzXtBpn6c&qVtA*Jj2aNbEO1l8btD8J7Xr6Ml~5^Phr+%YUgW&?GRe|c zp*rEwd<<)RQD>?f_ECSSqkc8fL7qjM^n$aOQYOlfC9;CojODnp3J4LIbAz_B4&)(x zo%-Zmd5B3g(}mFz|2h!3z+>g2B@DP*y&vru@p`Thl>9oUjD#~Ah*-sD8kSpAN-Zyi zE=ais>q0l}_Fli|S0~JYjq$3xe7gI!{vlv*yf^#Ak-t#cZ^m>Y@`>#I-qdUd^vQHi z-XWgK_4$bL@vB`gl0LF_fV!LX)T0L)ibY`5mrTNK$n=3d@XE>$0@v*LGBPgEH_HV{ z8bGu6nxs(G5D?G-*@Kns2XRgIz?5pcig7CS>cZs@R9_+S(-#j|_7COnr>|=_SuS5zf4tTt8YHTt zt!*PyCs4{Xjtfs3zU%U0t=iHzRpKUCuS6ur5M_8t5_H4zDW$=?qcj=ppFf@{Pv3Ny zF7L^A1Q1^d2^)T~=qY>llYqFIfVd;%Yir195fydibEgxPREnN*O8#lwRd}v(JcJR< zs-LK4>?J;G zE-(HtWH^w5KdwK^%O(>nXJe9-u~Hl@Y8xwvk? zVqXLMIkQ3f3D^v_@$mt}vpW+h@z93)J0?=Z?Ea|{THD?NSun(5e%BeNJ~j_@yux;E z?yyJ1&Qm<+oxdkMpQPn-jF&s{e)nD7q52&;{*uxjey82LUv^Z)N>~T(zV3N&o|e?; zA&&*A*oo(C^7t?x4ffdBmX_TFg3Ug$0vXVWvAmg@`W#hcTXAIauL_s;M(FEcVSc8rWLF)kPmbeJ{LHWyA< zqFhi{vIY~n7pAiZqsXL+OH040*cU%S=H%prldx$I#HI4Z+u@db?n~7PFh_u9yJCdE zS_m?=l!E*Bm8`T!V?zVt9Q@AF!@~JB3W1@fQb|_BT`%N=coz&1%l64K z6eQD)Hl@=81MUv+Vw@o=CF?%A*hbYyx4TzSIcvqo!PZE8kvs2FmuZ5zju^Y|j&AoC zYv!pqCA-H=YfV^ChICehPlAq7He%MMJt-7ldgJqrotD|~_5`*WlW*9r72al#s8rj# z38R%_#&?ss1`1O}I+rgo#@jWOfhM^Y-qX&4Ci|~)GxnGGSAmMo>)8rJbwSs;M7rPs znaQ0oDXTHktxO~S-aOPN&#}l`Sf4zBq%)kzDI+}r{PW3?qa?q6;l9h#`dILxr46uYKt*URB*ej_JPjSSVe_g_I^lTx8Bu{otcf#{?yLD6?ZQQ#gikz zkhWY=p>^eyg2%~IA@X{)<>``NU#GX>iKSEwkz{&xQz2ePjm|Ie*Sc=l8e($e>zlQo zT&H2A8P>=f&j=5{bKTx6vF;0RJWcpy76(WrTTs(kv^`4O+R7KcyqQ3JRpICifqSiobb2*v)L1DEh0pyjSVV97%D65Xuscw_xl6#=5COdeGJTd<~ ziwh}f;YH@mH07&`PDhULyt{Mjx-JuS_yVzY;gyYx5{#O0P!tQJ+I(oMsnC=*7AWn= zpCZ#N2|TmB(tk=s(YtxeDAJ~<5kIX8nVg$VPFD2FbF-qCI>v!yT5`I-uL#{-AWJtf z8p~Wcj73rymFnj{0|buEq$lfv=+WCKUPUA1l?6w;j8NX1q&aauOgIVumTcRsO>asK zy!iQT6qS-_2~=&%wPX(Pmr3Er%pL;gUztaAmz-zOd{5$a5gH1wprBFTIH;Dvvc3RQOQeCY^dn9B_})nvaMg%c8Ujfjpr7*yHaHcz%+;_OeQ1Xp*1*eTv~ZW3TB z$_#owtcRG&?@#~13ou#pi2pFZ`F$j}R$}P|sZ@<$;yAG?kiXIKqF|%I-?Hh;jL>e9 z3L$U$)S=P@?wg3^x;fnm@7JsiU62h(*YVCHO3IRnM-1zYBD5R9j1OXS6?&8^LOp@rwwf^fu#I^ekadh5H^N9gI9jOZYx4qQDsh^uT!S$ng$TU))DSe&lc9P3+es{`Fs4{Gbju36Vm1P?-Um=4(DZLIIKi5K5T#L zzd~%QgkEHRjI+1utb0i4Hr@V`*@sHDLofHtGZ~jIHm;{O_uj6+ytj>KzT)Ms#hav@04=TWZZB-qDdtL8=;wOXs#uNKVJ5+) zj?RJ9S-qIR$hq-wjDB6kn(?^US=sYA;zWCW<*2(?)wiUgvz~cYw#<>oT~d@678ng( zFnCYbnXaYy!Nm|$HOP1<^9VBsuw{fF`a$A&6+8kkgwUFHxy=Aemijw+-XN3Jj5$kJ zQF;YqdwYk+)>_irdApY=iVS(;%A9dUT|v(8*%m#U-&g_rA2&C<3KIjZ1W?y(q}L@n zJX=}}cE^qtEb611a77P0YQt?3%4dd(CR)*6iK9;fWzCf?@7q7;O(4F2#5v;*i!bt< z6b!yY!T0H;kbR%MY#EqoWpFJUeQ7&J@=*8?L#Kp3Zk$rzW` z^{{D75@Ugj80+N&eWd!t-E4y2$UsKNJ;x{za8rBo_Iz3>-WgAkGVS$YdTC+;*dogm zQD`X8i61*{9=y;Ty`Iggu}`#LJmAOo#H=r={vL(*EgZ<9Z(X(5qhAerTsxhaAyE14 z>9_+WdTU|ayB`i4a{0L7w_O+&Ro&6kBO7L14mjdH_Ar}>>48$|9Z!9HSSK*)9p6E3 zI+&F?eS9vy!ibZe2HTYe36C14_x5yNK=1gY<)%fr_tO;2m1cTJ#i%Xk%i(G76eTR= zO&PVZq}AiyW>+d6Miw1CJ!fWgCUQj`W;F6tzemQHC3=)nP+ zc*CBuurvAI7Y=o)W!%S#WFkmqOfF}#7fF_vE3G7a&tc< zeU4dC!8d}>F534z5a(O-AoZYjzRkz9=U4>2$=WLL1650mj*d&jUbN^UVHMobGvxjG zjH7WUW4Xx6`<1gs=&T#WFBw8kJ=xnXgPkDLdhLD7667T#{|c)uoVld-OKmmsr2OdF z*&mQ*pK=QqlFB&zH?isaxR-$JFgnc%q0Uf3T7eTWN=aDRWW_JKq3&eJGf4RSY(v8q zos_%kHBesJ?99$HuFhAia+MzU-l=t*e^V$bv$YYpG3iX+ML=(NQjAab7hVRmbUU=h zi+Q8gU?+kZQz(UNdB(Z8*^GX99R29#TSR1gbJQZ8h`R%VZZL|UD@v?I8W|(^TD7)A z*mAHCRNylisC1n{PsFBHf=Ud$pEjn+kg8G-GBuH( zjxay@`qRXOqjME6zxQI%mNamPbuv;d7)T)vz=4dce1qXnMexHtxq}S7zB_0TPd>$fs{-3AAMd+L^AI-hbiF!|_0v+&grE6i-S5VY-WLRl2YKCSIBHLo+CQ6Q&@ zy-OGEOKA^9M)H#@-|J3=l02^ zLf_3Y&mD~1MGD!?0M89%9#Sd5V5gS3!52NpxYV?}<$m5`XnwK(Pxl;C`UrLD49`W2 zSfq%3klrl2D(4gs7x$)SE|h}8^)B>63vB;xHS&qPqz!Opdddc|x&Tc3%PfAh;Epqm zyq1m*BVRx7gCItWLm*;1rEeuc#ckfK3}pc2jE%QX zXv+$Ty~cGLPVd1Poqls)_D$MADbesu(CzbwnfVauRKhjjdSnOa$h@A0b1(!4@l>kF z-}uyBPhhxj6KI(#dzEW?qN+%vtvv-KdpDPVp}9lW322^y-Q#x|VvDZZhC{Raf<)Gd zNiocF*gi|>u(PjD3o~yKa2Sy#HcrH=BpE@Rf2QY=@B`76>>)ZPrjWzteHP*;O*RWR z`{$`dyduyZitDry#};lHjZerqZ9as00j<1mg>iMT5pJYfyg`-wARcMChs(lw1hH%a zT<%azQ)K=2YhiVPDO#TeBmmZUn3y?>zBkaXq~6qymk@ZrA+Hzze?smR8Ea!*$6`~YIE`-F-=ywSj=7QXNHBvT=nvn##ZUL0*5;qQL98zL*L10#vw*3 z1*7=Q-qsBjP-hm+89yP|GexUmOM0_9;Q1iPi8gkTs;i9yhKsqiJIoJf0Reklabnr1nRGSf3 z8$9O&K7QE(D)FJ+_G0y2_M-J46;xQ`Rg7!jeY3jaz3dLkfBh?!93DaMxCj(iSymDnxi8 z5PTBDh!E&6fPQ`Ua1@LP1nsWKd0{oYhUdnoR}jM#mRtSUIy-02n@nLH754hx*i8Kf z!(J4JjjRV$Rc^N`onB0~XT;Z6nixd3fRUT?%%E2>rCW12Q7zHf15>HF?YTX$eubeGNZv z7qCm*{8&CeK4PxzesO3ugxMWLP&N1NFkI@%5(qjuLBAzETI90Xw6^5$i*J_+dOV5w z5)$fRvQp?dLYB79?Ey?SK;$VF=k0LZ;Gv5w9gRYxWUM31;wA+YB7JoAu@_Bh&g z=zMa=$iO8uaY$|wf{2u&D3dM_hpwX58-nt7E7NoGaAmVT2vqKsx;eJ=p{o7R}Wt5u&T+Yg_A%d_cJuq~nFmE7PDGy5ufkICdo+-IqOZ`;1$MX;1DaUOr( z5CiGV-l*=a5=VV6!v(28c&}?`bKZO+uR#8GGt{HOeixf7{*^j!S~&f3tM-zVd4IYNExL%4wWvN62M@pb8|!<~ z7#4UzNyUA8F;bxM7@M}dg708)2IcsafX|=qw!%8sM1yDa9Jh)CmevG_c6o^;XN z(F*%TfkqryLs2Ce zTmZIGg0`Q;FwveCwxK%4Y=a&ndx#wzy|gzi6BU3;qgFC`Ceq#7n{80#mpf0}x|B&82P-%NNztZ)#FDq|$w&#!!(el(Z|y=lIWLp#Cf zO)&5Fa`bw2h}FI%HSr`*xtgFaul?sTl%O}MI%+o4b1)f>VM7qSnLLG@@bh20Jb|vt zJzIvY@$P9uHkBYL zum^=>u7bCO2_Mb&YJB^`*w4E^&As*(Q0Ef7$l3a?Y+fg1y$?vp{DFKBBaqVTZ z=tIdqIG9sqZ`Lirdr9Fp&JGUwNelxQFjP@_WbQCf{X1X+aRC{a*Iyy|r=);Ns4T1= z&L2GJR4qX>M69h{XI=a`VWdl}8PaJFJ{iG|)t6gt?7Z)cqZ^Ai5j7zNPCTG)zMI|p z2GUTq;;&Lv;`W?k)%$MAHsSOY6D#XSXrKOwf_25wcBwf-k*f_R0)Q|zSC$-V50F#S zc;3LJ-PB94ei3Q9SNT&^$E>~nAs9MQl2(X2W*oy5m^0|!P_g#L%GusHm`R3v)L)I` z+G-++aoOTa4ulEgSplO93yup9hfcapbm#dRe+kKun=au#$9WuWLrN|$ii{w6^Ot}wTbJ`Zu)GI>;7?0NTeY&JcB)yBT>HJ<;8T7J=fYYX>o zMW7<(8*MhjLhH}Y{ec5nLbh%MFR`6et- zECXju$A{xN5=e$s$H-Hrl~Yq$t- zvG%`vfd$aTfpYLQa~NKZl1{YzL+v01%MeBKq&uav!#XlEu`)DEX8-^a(Ob(5Z*^5PmG8Ea3_&6%fYxlHu)nXX* z)~qUdnog@HR?A41I(}NG67N9SfF4;x2wgnMd@9K$9%1^uL0)&kca-*&RR~ldg6Efd zA%`fb_J+lH3fEGSN^ax&m$B1xy>)@}&7U}E1p?XnAI)d!^9NP;M_WSN%f1mB)CTc$ z`f6%m8eOdu+3EE0saChK7^_10AWJXoCXt`%G8tsOl=|796;Ih7r}D)*)73qt#duufxwzJ2u7`9rAK2D-Wf7i z*<0akZx1itzBO>RGz35|$+x?$AyGX@Z(oRQeA~)oev49^NKy$HCkq*r#mKbb)HXkE zT3-v}BU5X6>*LKNEtDl3FK#z0?=*Rdgqlq-%vMiP#qCjwt$t}4<$LyKN{ z(e_MO`g=bhG+faKl20QeMCM1?;5XmX)FM#?=}e^&hiuIa=Jn%pELWyZ7l69_`rhK1 zHu(&pE6BBuDqD>Ls6LX1?(+B;jMlXGo1NY*U>c@t3sElG1tSen6#lrdWITJA0qgppD%Lza9sb@8y@hK}S zUs`w?7ow@KCYyI*`XNd)4_E+~lgl5=+1Z*k>_!Cd&z#~W`O12# zBjG+T`bL`>!_(_<#u*_1-6@0#NJj3p(>kAdD|5{}#OT%HVPZlJ~hB}vem6k|8q zFRMrLX+|pCVgAHY+tkx7bx17uo#3&Hve2^oke##SwNwU6rc%nL> z24K#~t7-ZvzU!}P_J4*g@)-d7Q>NBI9HP$OpGvkg9<1_9@RPjxb`x6$+tVLkhK9O2AxjS{<;Yvqkj$WAX1>FS zsb1eDmjn4s!Ar2kCnwBc&_0 zZvQnl@hpr>Ck->CgRRfD(wLseW<6Yl>9dWZrw)DTUvJ<01*Uu+I0e>pZ(+DfDjK+o z`P1+PRjsSB6#s$EUnUMyrpa$n{GTC3*nsll+k@K9N7o=j<}AA!*0khGFWa-iXY2Gz#ze8Qko$jeUsgcvzRgzw6<}siiz?!ss1o)K~~aEc#~7* z+LSl5l0TWwsavfg|ELl9esYHa!1W!DE7l%L&_Or=`^sTq6Zlok#^~o<r832KF`|3?|r+@m_D z`jGD7aNI9*`wx+V4oXr|oA>D;|1}n*Q|6EEw8((XzKC5rGbNgPSULQOd_B1?*xI-r zua5agIUglqBR7~zz9rcG&%$fk3&;HvSswq$rhLF&-XPV9|3@y<9rogY4C?WJA#ZR3 z#`9k!G(Bg2afb1LWC`TF&`0-pq4V#6!olMf^mM0>hSVM)Juv16TQx6ffwhL0z7$9P zQZbz!30K&7sj+h?na8L|XGJdhFbM>ogGKU0)vfgs?NDb^nr)DDFXQb0Q30?*>b`1w z+Z4mc$17rEEfZaH53Nu9Mklv+O$!yZM7HFw(!kglNtR^V`?<``4{+}ao=Wqr);G>C z^?0Hd^+$>O?`T0^=YLq9mV3N76SX2 zhAExd&ItX?aYdj?956RKM7LiO^Rp7w)ASBRjPSx;i@^iI-j9Vq2 z4~m>-gmQ8%fo`5~b**7E?_## z`M8C5`Mmz?bryqRw+v}U^lvUD?7df-qY8M91Xa`sC0X+-&SG?sx4GFBP_Oc?cRYAx zd+{jGW&Q4ON478ek-r(|p%$;%LDEe@BOOvA(jA-LAhGF??r&}I zQJ?3$=e+0dcU>=Dz+$a6=Nxm6G43(OJ?HkVoQyaYCNU-g0s@w#gopwH0@?ur0&)@h z4d4zL>E#Oqgj1fi@!R)f`WP`nh)k(AEf1`cow<# zr=LE_Bg8fzq%Q6g_rCGN^Xeuk;VTrJ}b}s%|!LflGMkBseOIK`c7KNuShB7 z+wG6pF8YSgwj$&5Nt0TzRT9+5@p6JFmbNu|hb0JxKrT{j=>|$Qug_E{m0)BD(F`hL z)Oq{zioSj~chTNQXump(kZ8}2XvwS@kSBH%%cc%NyH`Y5E*IG+)gPiwtq}0d!qPfI zHZ=I&!V9jrjC-10aX8)igEsO@)b{KL#cxGF3!N4ReZ}aA$VJc_Mv_!uH?(TM5tMW! zQGso&OuZzAv)UeG$8c!{Dia;t)~f(BGAWW1<_yFw(%+bGeg5^fPWkJHKUy*HR?qb6 zP|gr9LqU+YnMq%Tcqm%~J~pcbq({g(t*a$UBz$~JR7cf?6E+mG@ow>(`JP?rgC|-v zReqM~58gmRTH6*{T5~JI#XEi&;17F!Ul~99^vT?N8wnS+8iZ2fb9?mGBLYKYNlh2< zle4T%3}jCbnz|s07JFnfUI#Lt!chpC=q!r_W!hb0BHsRoNT_lbb`p7BwLaaxGk(5! zT(kEVcyTf8=WG_69<20RO3oriOEko5BY=BeDUTc$h9J|(6Th|ZXO=RNi@N*#)_pPD zmN#M5800>ubMo8wQ1VH7zkzSmpBKowN|@43QEAaS;uguiyVKS9!-#a*=b(ih&&2cI zV{0@k_wUWs0_E6D^Dm}Qs%Ob<$+5}NO%4JSioV$>uNQdZ!{ktO7ql|O?XuFMUq*w1 zS|C&v$>n^31s_>YjJ$>$jBgpm@C@(npG~p+U<|NYfFz7jS_+H?k+sOY3@mYP`Vyv( zHKx8^5)?Sv@9)dO{#0|yM}Vt8Y$5UZD1l2kUa5Mt+PTK7f6v!b{$+!?zg3NPejiX+&f5*R6yNBsYb1Z2uE#uhldm=jKK^jvj|gC z!$he@2NsBF!3a;eRMVn~J{nc83i;R|lT~Mg_;Qupzymc})OX%K`yO$Jnylkl=9}B`uNVb8BVyX!4R0!hp81iA zgx$}Ha9-fHCt7)?1KN_mmG$Aq$9v|1Vb8`NpoNOy_e+`*a$_+)BTAPVz%0j{4jui_ z*3Y(VRE`V|I?zWL`ttrg&63}z$IOKOS^DwsNfZ5J^cU3#RDwCac%M*?`Z^hG*;4j4 zsaN0BYQb}*WNyltbJ(Uia`&Xu39OnW+NpEJxp*r`_x^SgMgt1@OQie3kCNq{J)pCs zBLyY-)a6EPCrD7UzC94JsU}f;GyXOHE6vx?uY6x=^?7#qKgEeq-iu0-yvV$JJ2hIC z;eH|21}zzlHpnS*CbFWP(C~@&um{P}{CVUfDXX&5X~v2$#fKj&hVh3rhvPIfG*mSBHRda*HFnDx$`8zwhimiq z!?I-^((TO5*lDcjtb`i6#OPF=mrqFbLZN2-g`T_UH?r!c< zd(JD#gB}I4gH=N~BSfn#!_Yi$txJAn12wklTZjgT1|;(+8ACp%dNxgL?VJzTj>gk^ z(%R&p{Vk6=O8QF%w@Y54;TudmcI(z!VqM~2B3L?oY25O(MWlrgC!fsuRkv4l(Shn>BYCi7f;3w?Z0GtR5#l;&iTz{wH#tKz6?s> zj{g?l%RX)HK3Wdxo?@JOVO-g}UaelmHN@M_JLy?*jJL~t^R=kYtFQVMt@6QijKYM0 zFYY>xH)qg9;DSrYo~G$_m*ERW+Yz9*(Jvg_Ok`DH=Gm z#Oild5=<6VSXF=xOl;yT59_t-rwu%)JW{~#z@g_%Xtly=E$o2|fl^^2;*|zr^I~ng zG9ROIqQ-IuSCZIP*B2S>W|pV;{T_WW32+n{d6Un~P825PD;3%`{jU3+JXNsxXt*b5 zr?JxNM}iCuaj^ZqV~^AHlDk9s#qy_&VDTqmUUN$ew6^({E#txq zpE~9f=vo|-V23F^DHQde>lbt?bhKwUXI5rNJ{Ol_jeT0DS&!mJN!3fO;ScnjId(r@ zI^EnopX_U8X|sug&Obgd5HY9>t=Y0!>^p>hhvGG1O5hwlPNV8)ji68?aAj%dmbhEa zw~W{JE~;m|$Bl5Em=T8`_w4mrS8|UPX7jTz!q0^#p6v_c3qt~DFv>8yF!M3iFb;2} z-kZ52^yW>78lGK@$v6D%JU2dr`nF#3$=i~_A>mpSZY-Vj&&fxgVv{{_p31YUO1H|$ z8rXjfdQDWuu;g3-UN0Yme#38T3o9nQd}Qr7RZu zZiPCFMwzZ&5?fLye(Led;ec`c-pu!z5(>l$lDU$(lZqa6uVu8@KC$(%6;f|>QlCn` zijtA$j8Eb@-oBGee@1^ggt5}GQa32DZE&P~Hgiko+v^Z=MAZ_NZKb=^T1@M=kAqc$ z$c;`+in=|M7E>!2@})>0FcY~*RY&ee!p8B&6U(3rUl-QfSNf!Tr=!GsrTTatjKnQT zXl5tKG9^tgJH*Dcb7vpRXtEWGlhZ?s1n)%M!8*RNz3rv4l6EHJRs~8+E1lig%Un6V zxsRFk%EaVpj~eg1yu8|gEZa`JUwls)qe)MylMAQGK`d8$QV3U%Lm`YgX_AY@bEfxr z_?T<=@_{pxj{2Fo&=0?FuZ(ZPm_A=>AGvVQOwjy@%!i(W8{P04BuYP8eLDT_6f6*f zDJ2_M6B`o;R-;t2KM5QVP&(^xWOp!|WGyyIjKzr2J{ z?1>)Z#4gUpOZgx36-;xQt9k2romIE12ra?YOlF#Evg@gNRf(1?lNWY@j^b^-)yYgF zGb1B)FI<@Ng)E{uwCjePQ!lv|V6QdeCYU_ldvqT7)pyp*dP!a`b~dzwdrrqsExaqv zomvpT`3`btT8nAR)>k)V+|T-OK0D^$6Y3R;=0$Qm873SX$bdsauHziwZaDYuB12T% zBhgTLx~(*9+9qYtbN47$CMXzp|0@wbD=8{*6;I!~P+FMQvMbB*iO3Tv41qS@)J$Tw5v zDZ(h`Ye?}pKMwUH0Y8CFQ%*6|7kbp4g{>s#36Nb*4b6?%9a*ntB*A0{KTanYT`k==g1!? zu;u+}A#10Lte97a*q7zK%ds8{Vs zDg??h-*t>Ej(AIjoekfIh-a4TJ}SaDH&?Is9~*YD?udY8$w1qwYG4-92RaDDU&NfW zV4R30L@IS|+i$QnN96hLqo-|d3M(t#iQlv~*JO8{zyEn-%iHV*LN1<9_>YT=hS2kK zia9cZI&8rq_I+(di7Z@beuc1cX2{1eBle zXaK+AUlG7J(E8_Zl(QTc!nUPC@d)n{8luuH!`wz zFtu@fxqnL*xPf6Sq3(cyKu81sMwC>b-bO$`ax_y?b5xUl!EIn;#r)FHM&F3p)yfv$ z4+5_%H*jfXScu6?U4TAUkQs2hOk&luRKG3hPYdMWv&3=z$?eKG2 zzyw*~Pgou^KVtcxnmL*o|C?s;C)drc=5;+BFT68uWd|dBVH+zeBWp+g-z(00HPYW6 z{+9E)p`4kkk)@i58PL)Jm=iziBTmkr&Hnl5e{8D$yD8Vxr+>EmQv5|8no2?<*KN*jPHjUstv^bL40J+2kL$f41Uff$QZDJzX2Y)m^{}_%V4| zepwVhCY?WZ3Ic*4f~1I$k}Kl+G-{I4$m!9JxS)B*bXFZn^bL$pf+FW%pn~#@xJr+< z^KU!1s3MY-Jes9TPnQUkkO*A(GI)P_`V7WpTkP_Be0+R5e)?^nTm82hx6PU@540Fq z5=_gkSvkND0SO)C^S}MLPai=~ttpy;D~N!I^1uCPLdPA0iT*clf&x&{*8{g(KA8O% zGQfK!`@dfQZw3B;Sph@eU(O=&d^Yj^xlwNcz2rt^m1=>Ou>0PoMX?97A z<8qQk;`{2Nb(=>K%9lY1808CV;UOM+m*5oI*4Nf57Tnm+_(V*|JJtsGxML?G%P+W^ zx7NPz-U={!aMpliNAVfo*MqL)RuWBPu{RUd2DAmo(Myb&qkacUVulV&T#itE3BVMnHzUT^mTk6BK(POs3fnYZ8deaKw>{F_!nKIQeR zf*)_HQ4x@E)HHJp&cCKkOr)ZXi+-Ta*>TQ|`JhJOvl3TFJlnUmm!}7tkzfyJw`tha z`{6gY=3JY9>qOki4%}je2N}m zq_IVX8|zz*GxIjMSRcgbM#rpi_%w#3PHo9Hgb~!|HBZ7xAney z`G9h7{O*_88Fhdw!gmCE9c;{DIU`92~Tt90|H zF4AGPpL_Ox2C+x%msM@kMs!SyRo?#v1OFkJ5Htm1I8G=`@ZoPI|NEGZ=6sVVBOIsFHn>HzEPVf2#fzj^n={RkTN1fjnq2HinrP$_zuKtQDX z=uV<|;=dRF4JIIx{LcFt$?Jqkfv!Kp1v;GEuKv^NaBm}-$fQOZw1u^Q$|wq7ON6Il zYDnW{u;};iry|)@nLFN5eVgch@~4;q9|14N1wUfH`M3E4decQZJ=}=p!00lB3209F z9jf5!Hufd)Z{%Yhz?CBgQp|o)gy3zhnkE2g)oXk>OEHgm1RJ)24^H{Ns%Vg+HKWNY)HGO4UNN6chS2F?pn^iEE}?yiQ@M0R-2Rc$$gwCxNl^9 zqv*mC&A7u>mB_?#CKa8>y5cHRWhi&WM=i&-gf0?jn{>0aXm*qN#Cw;&E)@NqB(4M+ zps^L7Zkm58bz6LP`vY>RYcr!duXilq2AliQM}DueK0l;!t_cEES7*92s1t4gHz-_m zU*yxn_SY2{rC!>8mbPHdLfJhHt4&_FEE#`eg&Z%%T)Es@h{4!(`1on1xkR9nRCk_U zkzamw?TH`|62`+=ty{}=ekW~O8HUMOj^jZJ*`KrP8=g{D3N};rPBX&8@Ct+N_oK(- z<@X2n?TqdWgg-26e!pL(Dc`d4=L`iMch{hK+od?c@f37QN5x`(Y>u>k2l`ax0 z2nj3fsjoDf6xtL1nLhZI-z-q8ZIY?-{;_~j_a&*uUb6m1kF;Pa`zYtlo4{I&A+?2* zy+bXUz4%fyWVDe$V9ivllV2n`1pT-YYCWm`I}HJr^Ae@>$mOp1zaW*M28v_K>Q07u zdx*fd8fC>iOVnH#ak4IiV7M<%px27xAt0jQQ;da>4yK;s##T=iCSAsxFliUmlbo*j zp&X;u!B1Li4}@oe|GM8?4GOHjA|?+bhnk@n`!I zI5ab-?AxcrFROHMlT`}!7l{V}h>0*G!umaNi9|rf<;Qlv!TzUYl9-k=V;S{p1gGLZSrXH7G>spCx5eBN7p_)+4GtI$*F=ey7g$b_yaJq?4Xvo82v6Mz})c zsneRlWUa-`=UKr^olBu>hTDNQ&!Y$n7mQWiFRG88$!9hf{*g#TumY}1zx&5MrOMhu-4F;HK!2RPlXc$ZY^0zS)EGJZ7J^R*|KC7V%rqFC9=lSiL=D@ zThIHpFgKsn6*)zB+D`rfLq~+5lC8>3lzsi%;`loi+_x+M^vwub4fo(%}X-Ns*g(9;k_MJG+bJsWiS%@jk8W zDK%`?wYR<0skwZYlV!T2fCs*uBK>19dc0X1B8BwUPyd{dD|!-_g9f~~z+R1`*Hyom zgn6esNa7D4Ht7=pp=O&>uIi-&BsK9o)2@u9XC`zF(jIu}A%9ziamSwh&oKB#22pHX z9OeM~39Z{wY2w8$J$81en8!}MXxHwCYlY--xX#|3GtE<#s9CIB_knkNxh#^+ zZ6kbg2`Kj87C18wcNfFs>>AHgNdaZ?FGIx19na$@N;>w<@)D!|!0tCFfI+<5zEgvC zZ4rLda-W{}fJRTxbn^~U8i%dx?{ z(NTZ+;+I!9SsmL??U(z#%-p(?bBm+d{u~Oy_o3FMujhTFO@GaQCQP7d6hfBwG{XGu zlMOm+lUsOLV)9ppi9}W6Hm`B-Y%a0X8%nM?=fq?aSI6y$@nR8Arz#^|=&YHXQN4X;^n=a4QBd+1oft7;B$%6Ym>vV4~+0no?4n?j_S_9qfHTYC;Q8wLXF;@0OWECC01kteCA4Pz)UNQW! z?C4g2hCiHpnv(rv70}TwCrjn|y)FrcB7vy9cpP^cD41`G1ecW$$%%wLs&|?>doGoG zPW9O0wDEt1D(pf++KHf-7HZ_U~ zmDS^<1@TIp1yb8vB4NKuN+AAr!z+hltjDKXleQDUYnY@I=&022z?t63H zU5+auj%fPlK5QyvohUDjA#NQ+l1)~fIO&v=vYRX!4hd;CUVFw>fA6ROV7E`9arpekEl0Mizj>?Uga7G}hSAp~ z0h6U@Dc^H?JO|k+^k16vLH7mEApO%^cu~AA8d(;KSE9N{Ni-^AENrSr=FwHIjDl4G zWb%U9tz6S)D4>S~()mR9Rs@@gjEP}X^`D_mhyYYVp`^ng{`bTR{fVzxlz7*|PUCxH zNZyd#W_6WA^)%9kDDYbPvoyn%AA>cTEDpu_u}qPdr%O>Et8J`r|1ug7H?Z7)K-@17 z_>8_ja~vK*!S`jIUM0P}yz$bGMf}qhmjY5kSsk`vu-fu!XE!+r=_ob5=$11h-uTrP z&|(_kk;t8rM*mRa9OI*lLp=MVenf|xRcA1r4hj;A-YVfqU>$rv;+g^or7)l8zNFI@ zejx;y5IMnNkM!?YDG~>u$Flp(6w|NOzRCauZzJ7=^+a&7<-D(#)+mph*iWFgqfWe9 z+-K6WK+++#)E#d$85^3OT>jt>kwg-J4kbL&r*Qt!VGybesld|n=1YGr7S4vW@`eg< z(Aq2tT&unz_zyH2f=q37yS_fAqOsdA4Y9bT`n!r01fe6T`^nKM|CC!W0R7g9vsS-Hx94ObB-p>_gwiQVHc2Z7xijTW$G^`%sC;K|4GD9 zAcc$_`nE9t-MW0+iPWM>lxRcGJElxG+#wQC7Mh|L(hhJ+L=Xkjl)=kl*b9zCMqfUn zf0nZTgAmhz6Jmw3DW`wP4>zwzNZfgfQU|15>pl_VO#)ZI@kJ(0tkB(lTDq!(B1)_a zMU^wwoMTPmT{=z6Cja2zEP4HD_M&4i-0rS{23;NqpMlH%AO0#6uTl!QD-*rvy&zJr z|6${BpKMV*U8!bPH~Qffx}YcvUh=YNl~l zo(SzS*5LmV`r5PvPzY=MP6DylpNO++Kye;n!`pmOOB2bpSE!reSi)ZFdmH9>Hpyud z@io9G9eU?id*62Wq`CScd43ObJ!xURxh(dxn7q;3=WGHEbAJV-;h<8R z*gD}~miFiXP#WEfr>9XgT`|73FPc{Gmry)<&hUq^z~w`C@->oLN}&Drjp&XAipj^S_$$r? zJyTooviphe`S?)g^fXRlV~Mm!;qa9c;7TB3P4qdObMZmf^zy&(JZx6|6!huqrf?}yjAD)6$#^WnTX~zGk zQdSf~yi-xrv=sWbsL3m1Ry!o&3YitCOYF{xRmEb05zh_8!>f}Mxg3l;C{zW1)QJs# zYbA<2LV7iQPzDdwnWD2tI~j0CgqXw~Cp~rUR&qJ&7m8=~ias8;>hDH5w$PadF~woF z*8pMXiv-Qp;J<%-V76Udz!+b^Tr#Cs&VKgsN!<0Kpva}UzGY3PD&Au;ds4&gHj*c; zC*bp0=@op;=Xyi0IauwwCDco8D<_ZlNwgpCYo+?V3V!bC`LZMFm0TKV7vLhy+ z7;L2^mpbl{sz0@PO*&Qu@m#a72~y5lsdysQLJL9LIO$OCPJP{h@rmNff_)zlnhsU^ z*PH@;sK_UzsxV+Y5H39u$!IY=%SfCAR7)=#a|$=jGolAen41**`BAUn%m9_J zJDC{@=8}_1owf_9+be=)AqpwD3jKCLfpC+LTy zU984zzN#%MvjfN#eQSYWO;ob!EmDr zkac53@ubBA)5AjIvR{a%g;cVpRsc>CVm5EooFuT=M{l5YFYR{LWLdFP(UjX@j9-&i zt-ZtJUp{-iVj;IdSl(hC4%%>!VMlo$&0s-nf3_^Y8}})zOXXJRYt(9jR+j0UeX`lr zTx_6?L`BnzB3;%c4_{Aevxa0=Ud6)ZX|2S-_bf$iIyyLRt=yMuju}u9fY(ZcW2xP# z2AAOU`iIq*2};GvEPM%iMM8Q4(;sVa|L*02VwjfBJ-G#0O7ZQt77R0)AaHN^B9!Jh z{@j`ENo}^t+u^1QZL-?2!;Y)WodM7*jYK0PwIXrod-kN3(0S1TH&Dd`ObmK?-RVnu zwT~Y?FGm8iYbY8ogC4F)pzTVAADBE354Oen`zj!!uqx;s_IfYO`hv4DxK+uo?coGv zkLBg=LXU~H7V_y8*!k|^J@vl$tSQ&nB0P<;nuEiqDumz*LN|54@S?zSICnAfQalBM zh~Z=`mQeNJqVnyr#;E|N-yU;yg-2V`jMqf3`?kp|fKSI74H1PI12_jSZryNfT(k3y zWHFG!tEn|ua3_*zXvapY@mrV9HTt6n&u+xkO^&Q44NGfISr(VUfhHis=f*xqwQi;w z#?aXl-E~?z*%b7XLLl^VKPLJWbFZUlyQy#J8gp1TqjI*NmYdONh&47O{25TU9Cm$n z+g*UAddCV}90^z2KvQU64FP%iM1_SBkmIphO|VN!NZ6i>4tB7Qbac$Iq-~7_VBi~e zM$@x8ua9jvegIAR#&~)66_MF5Y&V0ne6I_$B&38QcOXGDRxDf7tXoPiTAS)&g*)Xa z5})op)2holmK$ST%^E=TU{qiI=`Tnqw^dn)&etdF9wKeNzAm8O82c8+4SpA$Fg?fN zKN2i8>Y`QEvU@d22XNMrRN~n53Yl-p%}1W|pRTiwe=-nfZ$!FhzXj_}jw)_Ac@Bt4 zWoKtMSn^W1W{_M!EaWaRhbh65ZWoKtc;gi0o*=Rw$ zmdmu0LWQ|#<|rMqpD-poA}f&9b#}kJ(eR& z(Gf)#evWOfiB0|j+{B)HII+uDHOk|-JZq!Kg^ftBT^U(eVzt&P7*+3!dgb>%nB6B1 ze#_!gtqdFsGibG_OD(mWs?JZ~&cG-gZ?kotDP$@@{7@{eK#^cnS}@!W6O zq_>-X4op=}eg$KzsMr+TdbfVG5x#j4yw!NAw z3+?Zy!s>~LoW5QxbVP|FPgj_aL@MX2XghZU#=|TgPR=pOaa2rozE!)%%;kBsZPa#B zCL|u%cz!Txy)nQIuzbsXD>~tltMv!&B^L9wPHT}9ts4l zp~f&$^VNrPAK&l}%vbn~NQ=T9`~ikizPhS)!-n-xCe-^vXQ?lRuzbnr&yBUed`|EU zx|;9#d#xJbOi^F6*A7a~^WQ?Vv})|L^{0!PCu1I}cD}|U>8=DeSvrJq_^9h#wqk*) z@IS&zBP>QD4rsi*I5lbwA~0?YV}#5eb3nAHUSSfj`g&n`ULH)^WE*^gdS7l-kXy{B z@Oiud_rBZLFo|JMenIFnHl`L8tD9OyurQkD700aoBF`(I{M^|Xt-k=9PA2J*(D_!w zsZ5INcMRAEFQ&sSD3qU>mR4+!Z^(YB`<{o#bE2dzr=A3kEZ3dy{Q3P{i}dJ64{@U& zhJ%3ik|Og}WL73L$O#?Fr#x;~s6g)!xklD=Cku-$}Q>a?a(wpIbrVauJ8ie+4Jd$5Lh z`!0uhG+$$lOz6+Lrpo*{-bH zeCU%G$qApDRR_!v+0%-R+9U4MW3SWE(k9eQ?Vq{tug2?n*(cxG8;g$4N%fjo^GJ^5 zeoBa#Bgia{Ry^NE$yOrp+Dpk z6&DOIjPB9czFNCHEvj2_#vjv7t$rTM6i>%dY>m6U6cHpao2m4-Js4nVpCbk0oby0j zGDF7>+iFCE34e64G>!$XzOj;alEFyvVMB*DfIW(Z!1eCFNT`NQK8U4`D}&S|8GXI# zP=`J~la!pQv<4OXjS5KTaIWIwT5)5n^Tq^?dhw>|WM$zrm)>}(5v$9lR%)Ba=Goqm ztY{g&>B>Y!KFoR)4_y*58I40KHrOCJOw?3QfO^ghC}2cle6l}`4xDJLa@3A`?&82B z`5AW2z()DuJT7f>cBJ&%${=Fjh|oAU0sM? zm!y9>t3Q)_C#mVfU%(8{&9Cy8sHXc>OPB_N28~p#r?RTHQ6Hax_0-ex&u(SW49cC*p^bU3qd7E>8p$YH z>twcdGBd4@>Sfu(`Ko=6UM1OPuHWC%MHcQy$?|^+A21<(ZZuJDmfiBL)+sJLLcPK~ zW?38>DCB=5{_qIML}W{kfjosR9!JMxgM$lvQa))nVP>@Ry>+nB+j~HU_QWnWod2ZV zlE-x$x*}L`e&R9cARb0a&z;eXnR9^mM^Z-eVE}Q$@NnhJ>(70T=$|gd3fwmpR$l_d>>GLy2Kmdi?z( zAk>FLUjX1<2i~m>s5vSXvGI9M*E-d+E`QXOCbL1^557aMFGhU1<$cLU<;sCjAd2iu z96`Zq_y*$cI1i*FivpL&;bg<%Z{NOK>PhTMcA25)Fdr@%>aakVW>d?Btv$FTvH3iJ zy=u}GE4>OSW=YU*u$%vuZnjWiI;-q|n($6ge2z}l5v88o0(HNFsZgUL64^a%#!M%2h1)i zPdQ&BC-7^P^Gu1~3HP%G=|oQB=sM%HewewDQhyj}f>jfw9gJmpxH8m+y}A}nm~bw% z#;9FeUZV>}a~l3^lulAH9~4$l<#%G#8U1+c0tf_j+I7w`Vxh$G4*L`iu-=%Q6!%V@ zL#_UGoyKo`6*}AVcnP+jvW2*w$VX-UjN%IRE z$VmBj;`QNl`Zg!ST^A^r?(%%g+Xx7EtTr>e1E5A_9nT$6u2rKjbFvRxr1b^#Z5&w* zVT_{#+X z+~6rSc`s6A$5P@iN4@(Ccv54^W;L)NI@idi4!=7|ORsP78*1-+_lpfkPi)5s{;``7cIimwSAj2Sdw_e6kCW=tnMdQd*R z1~R?6d*C9fHK)u5XZyFo!E9DTmbl!k)0zW^&v;Ty2V{h{>JL-gT*Si3tyZV1?J`~@ zuuJBt7F&neHMEV^O_mx-?WU~oq-`}A*Bz~w^%)+lk6Tl%?{nAoXbyyNCe(jnn0PDb zje}1o6AfTa{0m~@_mJp|lNnmoz2vRQs$#AeIduKerzexcW}J?-Xih_xNC`bc!OoFK zNAux5_L*l?v`hL2u>h+h(#d2;0ndcn(MiX=A4@4&dVYv7!@t_qA5;W!7Wsf{kkJk% zx_s4u(h6O4^*WscJwtWhb64FP`nZCH=N>q9b*e*<9sP`Ctnn!lhr*URvODqeQ1}fN zj-$NXx`#?1_58fuq1+Q{{?0QBqZV@V;xO|8P-*tc=<#~8sDC2y0tBy1gT217Ds5Sc zrJhQyYNDu*T1`~k!~%wqe?(kSoJ?78C<2Q=^mJAs^F0}Ld4aqv39hspw;#`9e^5v! z?zm6`^TrV<1|76E@q#D36wI+)8*j_+65nwoxS?ZoQ}gq@yt zaNllc%tI6I0kz2rtWleZ3Bq2eig>XBfV8kS31CkKgo3))SSPE!T5=CKBZi_W5d~*@ zW^aD|Mi6Zo$J$NCJFDAf?X4-Q2i2tyyH46bn82yBT>6u@8F5Z`_`%k|(J9QuxUF#b`|sq4h0?=LH`Q%6CMtT$32L?3P^d?Cnsz_is`Vx8 z>x*Zw6=6TQ=5?AdNO_NJ5FW-hdCltO0y{ZhWI*2Qt@5X6hiv)+>*VBE|Da|CK~$)< zjw||NZEL(xNdoC+YXa%Djr^y`J0zTzwmeFTVQWeA8bH=&xP<7!1BxfWGSm?Dk0wSq zRA#@~-17u#N%8$2Zk3CcM!&}p$WEcPhNVV+UvlK76{M!mDRVc0!=if+ZFFk0_VVjn z{Pc{hHAt}yT14UQp2wEXVIxAPm)Y0A-S>jbx(+=Y>FRgNO<~B%e$8de4i#d%77@n- z;uQP5)WV}ts{yn5kAd8#!i6V?w}g|HmR2Je9s=Ju52K4o7Ljq2y;dgXw0zLu)p8PM zstG25ZE2;}>?ZFyrxZhLtLDcEiIH4pAgzWvLpE^kJR$`srPiOsb***|N3-Z^x$7iY zz_%vu)97W*<2W*0Fy42HWB=C!zzQ^JE7KgfUO;J!FOkA(6m;)#Q4AI_y974rQ@z3M zwFj|y&66DcW z%R%Dj(23l4K3W_06oSxT>?C)Z%;Ta_*FCj&(3yn_^H_fPe$y?b*h$*E(c{p12pe4G zSc~!Sp-+m5w7^_}aB7`0kVP0XWuHur?}$!5(rZFUb>b>c9fijB-TcIshD7t6cG`_T zjVPTJsNb4|;pgd$gR?(m!$bA}qn=R{mLr1~(&@mNirv;fQfNG?wB6ShxWf!UEpSIV z(Kc^Hr03>Zn9dAJ*va* zdx5%wH?}qQDPwPQdeRYl;N3JglV1ay%0Q)NmIb6}R9~)fvciH6hi^|VsQ3e!*0+#* zRJxu!uU2N$lT{TI22aQ}A>6LpX)AKOi*dT0K(@7ENjQ43(2zVNl(}@cpx(IKKr55< z;AE!?<>YX7%L$;*;I*wMN*F1>DRCm5O~bYsI}@BV^aDjafwsy_iif+VerK$Rqb^C+ z;OhN*xeK)XipuS>X({Hnmb9s%<8^H0w%v?IV>9#Fnh64|>j2(?sxG*qVs%$DyncUF zlSZ{j>%fkia)ROU_9 z(98XE@B4~^l*4reLf{n{K_wp6KGzc1L%Px@aQRtti2aQv0of;bJ}hbOzevnj9+bTz zYc?Q%a+8ReJ%?*3>ol*;jSQAF+tAyw3E&jgguQE&mWSxiaB9l zTU*B!m6mN3%brjx!!dZv{>Je}D7}{L#VA0Cdbz=pT@=)i1Fw(Tz>82IlM=(w;{T*0 zRuQoF*9n7mj6Lt195}n$aUa4-9PqPZy}(#r_9UyZA&}15j91W9^yEQ4&c!g)*yn*! z$x8F}>g96)-tM9R?R8vCpUrn^)5#BA_u;k{l<)Y=Sy?K!vrLU4yw0w5u}@Xrzq^flDi3DzraoLNM^lLhLJ4p%@tel}N%GWI&++MC z0aC&T|G%9>n!67iZs1M9FZ>r4X#|Pjp-QxIPr^9N8%PBYN-JyuN6-u&tg_Md;L5rQ zFuA3$jZwWlU`uAX3=>PFNWJVE|I#-Sn>%C#MWW&*nr3qw$h?G(A~hU?+Wu)k^w@t& z306Jan51Jf>D^mP2mGl#d`Pe~+tei%2T3z#N6EwXCEfO~KyYUdpc{_WezpTTD3HV?|w%$KK(Bcd1cDfA|rc)gxBg#z! z8!z{{xR2Jy%UI0@sRslr%M5|q-2GtP%w|vqMc{1Pzuub3blYKj zwmID)ZLLa~J*J_oFu_U>$Qwtkt8B2IA1n{F%p3rG$fS^zhXFTB2r);eV|u^aj7j(V zx&2k{=NkScHjU?%^}0h99uI9k@4Wt)OUU89nwuj66NA(&)W&^R7>L<*W6n4OLsT1~^fljLcE@uqD(HDXe^VJ@ z{QE>lrr4}L(a~^H2iIR3_=zVXu!88v{ zX2uIJ(*t;0DnWa|&P^vW4)1IMt#E9iCGx~QA%O%YA#q6yIbMKnEVY{#E7RtYRa=~} zD2@Tb>WGHZDYJTM=BMSUnle60EyZ~p63*^n^&A;8dbN_Y0^LR($2+w$-g*Qk#4Z+z zjZk7ifZQmZoSW$QLfo-cNfn^t7;x%?i2NeB967jw=j0-H#}D;pcl+rF;lPADd&E9f zGZZ#e+rV(~cpDRV!v2#dq!r9y2;d-$QgO^NKupsetr1-tbO!7JYSQ!E4hRB4#^pc| z66kh2DNO0eYVrQjK?kHFUXQJ&D=gwn>5$I(miacIjkv4} z=j5C=Zva$goR2G{#ow+CBaE7Ydx+EMPlW4D#eCbyJzf;7(6Uz-Q4ca^60oa z>)4X3=BqFr@3c`!-@w?wb=X^$AHaTmvcm|_GjW7W8e@qZ@8Oi{F@S+RHmg7ldi(sT zl&9zCmGTcm;>%mix#>el%XP(#XyN1+SsMv=LQIXVEBcN11jKsWBONcbY(^ z2W${XRKi6L3y*p*w!44AWyeL0|9*wUg{<^Qu8s zgY2f-8Y!&mMhk~p9vK}F7id*=+_D_1)JH)DVdW6F$qHnDXKRU~kG|OlkQbO%6Xox2 z(>&L54w}hvM(U@>T*Z&3JjiY3G@WSo$w){nwtm3sD{&giw}Aq&BTdMk`KIj}-^#M1^uyw706;acw^= zcgw9vtA^Jl5pQ!s-55wY(h(ip{B3uXDvO^eYS(c3lEi(zn*gVGk&h_xA-S+@eUROExH+S*z=cg8()Wy*(f?UWx z75HjnJy;`Mr)Ns2)K&CY(-k&#cauKexL0tf4Etoz_F~m*BO;CJdp+tYIJwwiFqO+e ztX%P#J)|OpUBU)c5LMt`yhCD}M4cpVTHBa4BcC0$*kbX|U&WAk< zauFiA``k^~B{uqYRjbNO`iQGCSIa9koh<5S{ANe(I>4Iyrm+~h&6^9WTrQhQk{reH zSmn|Q<}<5f>9Ey{dXK{(XNhf?o0|IV|Hs&O2U6X)|Cdoz8fM9=Y@)KahJ>VO7@@MW zv)4fzWshu`$=-WZ3X#pRN%lJSIu5_pd4gY|vAoyHzl9KI%fcwuECi1IqZS6h2F&6s{+D&MS_Fw%H>Gk2oT zAxV9+H)(QhSp!qGwj5D^RF4IM#(bB>5ktle8=I%!nsz|Pj>Gj1&h8TXM!G$TKIe8% zhj_KkJ^BY5A{%=*BaTJCQ^c*PCA(ylRkB!x#iP**_dRy$@|8H=my^ASN@%IN70SCK zIcEORHB0RxUBXK;GGiisqsx5;SZw) zr?!Utnmg)*Xm-U%%?NRU1vQ0j#Ku1y2U2v~hoC2CX$tmk8Ro;|lAWKT(iPeMp8IbH z&qiA19~~of_%AQ?@4q(Pa5;n>`~2nO-(UR0fw}e*d)u9~&29O`dDf0am;;KDhTGGb zU-84Qn`dk4-Y?+((ROKn`ClK{_IW^L0WIy4Q|kZMhtJ@t_;%Er{5Y=9@3~gA)_m!y z{7B`Gkh3`7rTq8syv+gD&Qlk`vGtLIb9lHu)Vu5q`L6xZ^nbYYU;DSUy0K+I&1mim zx7FRko@y9__YlT*@V3FAy<-aW;`GnYb-L=7YJjFa7+!(%%^t88MJpcM@8BhhV@*iD)8_a+I1QT|}EeMRb|My3J zFVPAT#hbG4|2ymXw}CU@U=#^uLbv{ZK3s0<%88<{{yE}wCh~Xj_Qy3tM4{iZ@QJ8|$K)4R`nieg&K*7>PUL@C6pO5l4H~1L@S65^Cu>^c z9fca&afH}1s5KP7^-Q7{E)?yi&AIrlxN>jBz5lvuCU{7L6H567#ulmi{m%6fV#6_A zdi4<}>7CazWXd)g&L5D0SRqck+P6+g`Qi>&B60=_X9Dr-+c*_}7;UuDorypa-+cRm zQGqS2rYIBzg?!&WjSzQI1sDn^z}rssCB9{pWD#3zQBuq`Qeo39Enb*Fb4%tZlHd^u z!Zbwq;Jf~9ixAZNox$ZtaiE{_9HsPqrtg*dTW@IwIB%?0ZWwjvU!&x@%}U06*;}C3 z_B^$K$)vRwbw37hI+mE;9%Bdl)XQOl*RsQfN3bwQpNODgRiXaf6SE{~*=`)Sr!K#*G#57g@nZrzNaCif)T_dTvP74=ErR4vM`4>fDN{bML-Xj+7u9rt z|5zMw!Y<`lSNk%_X#>Oz!AGn@Vjb(aI;WUtqCQ^$pg$&urOBm?4X*V?wB{VZv_hbE z+|9OoKaDs1l`Pa^z9c~1D7u4UVM9rAKbgZA4dB&p zU|?!hx1NLonp7VdpGvW9#yS8Id^v7$S-gbIr}Gu#;v{#{3z9J;DETKc`X9UCO3@%` z>c=dv4RLw=02E@YAU|SFD_QE)p%Mms>b}FLB-fTP+|AFSMlMUD!fZ%qSCu@hn8TuV5{I*JNk!iOv6lV?A1l1Reqcinf1g{#;TA6=2*HCO{u?o1mo`-7@BcoL- zfH`#InRgjE4V|9{=&=LEgo`O!VUS?R7Xyy!L(Rjrx<@${?*Rg) zmC38G`_SM2fW&%33+6}*+Vd#X#;uda0k=d(Tare*ll3)n)_5cEQ??EV10&%sCy^@) z&GE+1yp8Bz{>KUOQO&tuaJn{J?<4{X2JN0w4Cq9CRJO6CI+pal2 zj)y$z8;vp2Lq5`Ur+EmUTY`2#b*U2;fuqGw9ThO~hyT!V!??Ftg$}iHH!}SA zBKS856W^=|v^Z-^o!eU3|A_FjXM^zd=&;?~A-!o8H1MQ?V!_Mkk;&Rw$L zd&Fuww>c+zCvHEj@T-Mc@I!%6#ST5sn}dR`!sLe>t#VqM?z?Ga%e@Xu`pW|!J&FjQ zO+6B&#A~JWy5Dn?kExVWUM%gzWR^~iF2)pqHJ8&9JDF?f= zq+jU)&ch?isfC$RV zVHduykwt6Eft-0j-hJ-X(GfM<)jJiQN;q{PYNt8LC$*vFKIdg>Z}bb|I-{M)l~P}= z)^Ruq9T~wE`Gg{9fp;Fs5=gqMaE6} z1b0!px%P>Oz0@qIG?V8SKqn7*NI?6z+R2_zV z^2oQk8~;b(Nc5^4N|-(4vh3l-U1!B_TDIh8F;dJ}(~%_)!H7qa>;0>31hcOvDw=%h z1#nbXVNRy)>^;M4(1Bq&sg|3Kb~>qlamPp&_;j4kGeiCzdgi)vW+paVBcB80EQKEp zx?6uX)5rouV|DnwrJ$2)ZBzLqNBQ$pvV1r2RGKz@|L<%Dk1c9!E}V&VCEhpS4FgZA>F&4L)sZqU z04hAMpjmGqwd5wY}!aUaF;{W)~-4uO~T%=)$R~ zj;w@Hmbi~<&z||Wy_+?F3?!0YqHZhBWQZm#v^FxHPWxYp?c<1Y!1I1Q@*Lf=Shs+w zqnxOkfa2iC+Q!tK!!I~rH&N0$rNzwou}8#Af(Hv}d99I0#eTi2Hx=E;U^mtjH)ULR z2n%N@J#Y8DyMViNeRB~olJws8Zx|O5MIF;hppE_8LHYG>P~;hTGYO~L-q>%Kw_3LT zvCc!~>_~;OC0Rwu@_NCpC@Xc3x8WRbic(Vd6g!n;nPm+g_(=tRUGX2N-^*)4$ z&{(^2jr<3gYN~(%rhS1dpBeeIY5aqa_zdp&C!)>oRvq(t#KRZM7n7c4|SCrJjqp; zrh5!a@fW=SyjF0d72wfhKz8Wjzxn<=pIG3Q zn$Pj`cL4o+#(dxcR@ZWRNn|?zgGL@D^z)C^`OiT^%lhrGYTme9jSXwg$4G3$UcOQ1LI^;$MEbU+)W{~eis@*0l?sNjAfhi^zKN+87nk@6cUQU z3&vAD8t}Vf@$Y*qSxCGv-dZ!IiU0)2Ih7lXLsM%*_-c&1V!Xu1bLrj$z}?59cEdi0 zspDyEzsnh&&+XENm5(7zYo92k)U69+1By)UfmzCS^daocg8uO^0l;G#wvH^Psuwv< zqr*E5%7p^Tp{E-@DMWKH1WfMyd_e>Ie<%3gZ*X7o21}%%9|&1VN98ChhDyWsorVn; zr{?tM(Fcixb>V9Fw6s!gr1iCpTBeDJ@qenU>{44Nw%wg|()v7JaRYSiiY`tSp91)W zLl64g<_2!6M;P}GplC*EYI}vH9%dmO#e#9GRU{69K*a%(jRa3#ur{7FC9n1Ag%4y8 z4&hH`kQ#+}y?fyQQB-1pu1o+m-~B2oO~hqLr$kg|w8fGHR>zyjq{8a>hvW{$F)oQA*j(K8o=F@O2=mkyf@*xVn<$16lX#aH#1ZJ`^@3SvljqQKKhCr`!2RB>fwNj&SCkcX&l)uDZGn`^l+C zmlHop|!U~}i z{%is7mg{}Zg~-S@R{(kHvAvQ3{l7ZoyKGVOJqhuH^^ux%&gXz1KDh2 z%M3N6IAr5OC`g8x_qvQ!LlZ3s;+7idERNi|Q>GXv+XU>)Ji8IT6=TBk9F*Z;As3ur zRp)eB$Or&4e;vX*lZP{HR=jeq&_nUkYrIpcYA^Q=BYHD(1S%l00KPxtuw>*Av}*y9 z9K*DUb8rh@XkIk!$oyzrltwY7{W^R<<*pWHm2#45{|AReEXCO!*A#5dugA z9=n-Nn~`Gr)ob%^+*g$c2KE++6EXo+b7#r^pYB((pAZ36QeQ$hw)DyZ3i-mL(lw-{ zgP`&VM))M@p%*pf&zNVhXT750g56kL(B(UwM~u*xGt}!PkY`9q$3v>B!t0$P6*Lc+ zd+n0K^U{NcyIi0>y_!kB87CWXrg^s@2hl~fIRIfO_NvWSR(|{_#2q~N0J^Av!>F7@ z>u5@)y*@%}QnGl{FB%2dA(Q5P$|IBzENGu^Sio*r_crJyD{+ZJg)E+@95RfQ@!Gt$ z)pji}82}bA0+TPk`3cI*xkH zB1Kip!Q%KdH1Co<%8isM7O4d7oNJ-vYqVSi!d(32;0urD06f7*OBH5ye{CHY)*iaX z4ND2J`jwt=CPEYOvjL@WPJ~~7H;J?Gp4dUvMoXjg4N>h16mN-CH^<*cubODGqvQic zZKTP=HIWS&;S?>_dg>r9dBwr? z1T?~oj%&u*rzby3vX5H6^nSm2u~y}^F{Em!OPHlO0vZ?_W#zJVr( z%b<-@W?>VOf-)=+aNQQ<1_=)8^Id`{8dg$Ay@ z4RaDilm~tVh6nADum=$1TaTEw3@p2u5{v_$}K99 zHJg(~))`bSH*S5jnlh9oKRCuvB12pfr*w|}tHcPisV(tNMGfCusO7A0-3B&wxGVg7 z^rjJ+ldUV=)&5X>iD76!OC)2`El&Pr!=_{%gL8ed<1EZ^Epq$133d{UOG}b17lzqQ z>ttS%$h&*8EKP&i@FtfJRc^N_)AABLW#&2J<)?S0t0EuQP&iHD*)c;eW$f7;qS7Y~BzwVEMoMG2v}#Px5%c{1ZgRe`z( zXD+x#I9>6rjC4@wv>DfZ-#3G0FPN&QvcGrhIl{&GX=7=W3>lbKub{fW<8ZSJ{f$`D zKpb`{gci5^rT4Q)MR}IoY1uv0zzYFj7TtTG+MQUY_cZ zWRx)8h~|ToZu^m#VPE;9&OxT7uWpCe2myuj*Wd}puewyAXa|~p!~?%AdYa*sa~*H6 z(3A+7tgPSggN{~#qVviX@TDfK4LQ=9N1T>EGM|y3J?9wd2o9Q^cb%y0 z(UFcTlU~BqWaK4oUp-0OUhnoaXN&2-2;VM}B}AjVok@Y*XO@L0ifaaAUnjKHI291% z5!seXK9J=p828Ex_B4`F=ML#k_~VHmh`rlQ1s$gUnzxH&I59aDRn2=lp37MLU0&W6 z+=Yd8(^8;!+#im2{%IuFqo?7z!g<H1;MlJbPSiDi1|{T8d~K6L?S^`QV-)U%8 zgbT~hHs~5N&3d_g{lZmxm<6{ad45>eZ3gI`Q!zCCB`>xEZ<;Y3HMK{j1 z9yZ>4u+0a^8~RSHi7+jfqx`z*t%je_weRgCUiIaul+l>pNlfB36Mp5V?*S*X8(Lo& z`~$&Ue;wzQKo`%qt-i0qj^lRwwlAk7D3~5HD{+Bw_OYnaWVz;VaAl9OyAGf1d=-qg z+$NDVLkGvJcb;<$wt4%W?!fa62xi{=u|3Dp2M7 zB+Iasj%qvDL2?m%+O579KNZ4%WnpG&u3r)-m?SV!ZfO$DGeuJt$4?Q6%9pP$%Zxwe zi=Y&r-8P~ASvvJSdwoh7B=7Z6MVxWkf)GSK#hYm36u4zQ+EViL zq!}VkhWMzKJjsjp2KOYG`AbVpzu;i^Y6wY5m6GYW0!kc?WMGE8LZ*M)d;aSkZcYM@ ztdD;N=s&*>EWD}NI#^C?;Bbbf(s{6N*UyBce!LmnFWS?>Dvg)wa8_@%bz@~5&yn(^ z6?{=TcJQa4i(0}%<(NL)QbGOg`(PyM1n|s|t6)rA3XcU3&d|9I`s>E?*zGNF zGxjQIQF}T~qFO-nGNt-fTlo9m7fXYSTNahG{{wXni}ek!LuE|y_H`rjds;BJZxZ`0 z>30V0<>iu_H2i>l@XE7qsGN73$0;`_xHL(>``P)Q)BG7w*!sF?QKMc8F?03$onyHP zGO0UuRz7zkvBGGOV(Dh8h<+nY%01v24vimTK}75OrSQN0r(2Fw(Sc3Wu8Cp zv>yvMa~uwZ#=@zw|Aol;X;RC?1h$$%qlbR%dcJJb`Go2CM2H-2@yUq%+sHIn4<0_O zZ-3+Z^}#14F>>$`$yP#l_dQ>#ckYZUqe?;z&DJw!I6KmN{_sxCc;8q=$C_~o^Z)j^ z8)VNtxM>f=dA*slR}jH>%qY6PSm=q@%P@E2nmVR5TjcNeY5Vs7W8Qsa&tG!O-;kvXp3bF$*GJ7)joqX;9K*dU3Lg9HPE|tmMz52mhg4 zD(Y6>S!T%g669{`78X60qCr-*gqZV{$cOGExC4+IJ8oHL`_f`a^JmAiE+8?>q&jVk z!1i_dY2I8^qXK66kEJeL>V5Ypy4vZ-5*yw+Nui#-ATg#LWD{OdS|eldKM`CrVy>^+ zZO0%Y&f(-dnltXOX93_$jyK9@&neaLPsQm)HXXTq+u-cIzJQQyM9=QeGp50E3J|gd zLxF08K$i~yl!2Z4<|rtg*#?zYR8F7fNgE46oCNP*y?n2)H9C7s`gi*m<&d&+aRoxxwe>B@Esb^Hc2ps)MFsKISb)*@ z2FN%65bFmMv%;GavG(KY!M;MB)D}jiIh0z0bSFDi168oQhkNbFrG+d;t814g(NEZo zx8D%R_ks6dA~pL%g;3M(5vnv+_zpq&5yL zT5n%pQon{x1iA3~8J+NJSyj~#8gWNu_hg^_@VYV<(MYo2rq-=+XD0pH8M7)j=5zz4 z4Hl)c&05^Ut?wqp6PDe4WTU}*G6&Nl&vn=$e6MOsF)=ANpeX&1@b~-U?jO6_aeWOT zzs1JKw?*IJ`i)GUA$Ohl)Wh~gHJkq{YAMpW8W721OqK)49Nuz9$>3gO_YALRpnKVm zy}o;+G`En&w>>TO@W}RH;QLNW<`6(pPrHtrlhYqNU6iJZe(<|{=`ynC1$F}8dPZUM z+m)g#2)=1%m#Yd7%G)jr#jjp~i>Qn>_ZznAJV}bR`YiYDFB2qUFau22B}d0Gfn6Xl zqXh^Wx^3dT;tbxe{Ez3oaIRWdxHG-eZro>9yuq*#E}>F}Q`jK3xp!T0N;8`FtuLDr z4by}%){^0Uu19~m^~ybA7VnhF9G+e25z9S5@LgaL4j>8lK^huPr0Ls@@Je0*073~$ zB14(wh_(@sYy6-cppL-Y5!DM|pm>y(IR{{TA|tt>$m9b&89De6qV@#HGqs_KaE02s zI)3An+Xadt{Ko7h_KUs%TDlZ-@nx8T$lFrOF~vr~HE8mjJ$rUIzwW^)AVD>1b+*lX z{lLV=HulC4LF#-L*AX-;zui}4V@gayA~s(98KmkmJ`@|hHr5Aney%ZxXD}su(Zywe z*UKz)f||^w+qc7~)80#U>RO^|2_Dqw{+ zLAjs!zyX`Edu=a|3W!h7r&RNBva+6%khpvIZc~fyt6pq*;Kzet;J4vrV9WXh8XsrMdYU*HnK5soU#DmJ&sAUoM7Di{kCux2KD3dhMjSiYCuEL|P2NuFY*=F*Wn` z5~dpG%*2P^Ovn+e)!HwNer>;D;skImM9OEVa3a+Yz%yNp^8j>Gg?5Ns!8~s@u51%m zUwrD?v}DIqj(d&q@mIpx^Lng9WyWt=(~mge@7m`L{az8wnZ-`g4~TN$#zKNbuyqT= zi4*5HW}Znr|MrAfY-Z-efv);Rp(YU;+fsYOb0rCIb*K7qd@sd52AUh|}wN_wY-Yeyx? zrt91+v62+*Aa4}Jk(z&w*KP9BxZ!c})2ACtRmplo+R=maz$u!-)lty)pz4H1UJd4+ zpp=l1;K|g`YP&Ec!xCdS3LZ{vAq!l%#u?U&!imslHPIX+62D>wELX$Sg zUu|OVF3;$g;@J?+;n5jwc+I?;DuZ($N%UdHaBy^f z0`P_w%i`jbowTphc<2N#qLB{98QYou2W|!2~DiCw0VY;fU z%pifnCKfNP$5<|J`)pD{2_Tpk&Dg~mQ&E~ zv42U8Ho$GpM{d3{4COUAuIaQGpg$mb^5hrBHN6D{6;ImL2T9lLaFYe)Oo7 zcav?07UBP2$0j7yK#N|=Ys`P0<7ToqAX{$~XLsjD00an#pL+2qbmLfvCGbV!je3ha zF30pi%fsReqK^VvJr)HM947k3Yl>%~!6sF2QxS{ujOLcnA6NtH=27ZNilZJ~Sn*WZ zxiCZ^rhulXft6%rV316OVe^obm0pf&MjOtlOMeBT*w`?sQ^uY5kBKR`x$|%c9Su>I zSn5Gxr!s*U-MG{ukBySTM#TpNbbg9fn(hM;%XkCaZII2_J2?{X)EG%6Z3o0g`Al^Q zwf@_;ar+kI{KCR$=7aIV-8J=gH*_4LNv&Is_jE}j+(mNqMqoSr(0IJT0l1RhZRpV$Xyd+4k4p5kMnAf}?X@`HuSypYVn|NuRPuD6SdMn9 zf9hL~6?rl|r`(pJ&P7IFgw7U0y(epWtu&Kf zl*OXiIw`+3PoG)fq9zrj&6}qf9$ibZp;eIpqPyQ^8*ypu(4~zD)KsYFD^tLRI)!Xd zT!CIotp}cG_*7S(z(`zdte~}9>801pP5e3fBElA~7LAOhb8YlF5QZRl@prirlYm^zU*Pf+Qr*a%!lpm_0~X_L3ZZ)!-#NC5tsxiO{B-FYG{c^&r+EfCV7Z;VGy<6)f4#$zlUQOZv zf*ugFyxusN5h(evk|KoN=(D@nWt78hYx6YlwQAjIwQ?(^BV;& z`7GR_QhK5d7i`a-btTxn`)2v<{AWjOWBYlC|THJeAQ3tfR?$D6r~X%7I+! z_xqM#@YJPGc)Ayo963|LxU?umFy&S1-_L3_yYIvM_ZKuxW^-dpKfK`E$}Lh4=C902 z$kg}Z>&(9I$h2+b>`Q+zcaC@oXt!zk370C$o9;RcbJ1Fwv5rN}! zA{x+gwi4oFxsndz~3u`c2d+@hllul{Zo2bOuHcV>|KfV|1jX z8F{$V>r6=Xq#Gk?M--{*uSz8JuP29Lq<_BU(32<|)1q#<`a$Z}XHxkY?l7jLz)j|l zQc}LTdI>6?Ve%x1CzEicOz8@~fQm$&XRKa(@m$1z&e_-e+4JW=`S0Bq%TJ5x?4CHF zOe-wVeZABK{~W`QsJowZXL$5F5}P2Gu5V93_8!n#BcV%g!Ni%Xt+C~!0`G5YtMfhOQ? z=Ha?oU%(M$~}nZOuj-j?E8L0X)4n9D=Jb_q@P0)Kh7~@}GeND@(&$`Q43i_o}Pw?hrUj zk>^71GRU3z zAsWn{LEc=M@!D48v?ZDW1NqFnyxH9X1ANsOR!<9>EJQzZ#-+s+fP6r0Nbe^m z^w^7?Yi_QzBG#{#qGMv>Y%smiO1;mXJqvY06#oRhI&G?7au~$sW{m3Xz9P1Gy^)Gz z!EY}B{`^q5HMb=L2$7fM_5w=LUoX-r*O$n1axPZSmht|5B7ds(T)rD~$(WB%pFWYB zr<4}2xwgTXi^NOyWY68?52_BmgQC!g|fd(LHc)G;bKwiwf#oY*tmd*JJf~= zDXp)sdmaG>e9GkkO-+IZwr>j87rG`88P9|q(8cJM!w2XF!tw~wI&$QQVsm11c$tpG zy$YgLh{GJC)2Y|<*TNyDfuIh^$pHXIkW*GxE@+2TOlic;=~hKFw2TwexSv?UJuQj` zmUF;FjZTm{vx3;7|8;()hb-bB9)xS*yY9h@bUY;k0m890nVea&h-5Q=`QKro0)^mG zWT04%bj8ne@Z%^$tU`Az>cj`DiqaS)I2gcSFzurEQ(~rRn};VZS;$w>ry&p=@op%I z6r~10g4@w0{I)1wO7pN8`N$ZK7Tax}64MD$#+`GJ3e69>?xquDPt?>jFFGEGz}X%r zDa}prM2b1&76S?j0W*CD)`>}cC%Rr7I9A0?CUyfn$=N3+|0s(5XG^ia7Y}3tN{;BH zTpMSd0K_z%KtG+B{i|=n6q}IG*>qcI+K(||S4JCSrV4L<*Wy6LY^_V~=>SG;4g#ec z-NvRiAn%e2VvH(p*W7ksqj$aGR8B81HH2_r%KoD}DzY@ygX5lg_91d|?f0~zw&~rr z-(E)ZoV#uIju50FioRKYAdCEJE>dy!_Yy(?2n)Gd7wnWJIsWYVp)z-M&!m!9rBavWk5U%>ta7Ud2+ zEq&FmzlnGgNF%Nz6|%9hvEi1KxIYu6(LwZM9%&JgiNwBGBV-M#@v+`2)&*^CnCe_rgvljpj)_I$2sTt zT_n_uA^BjPh>$EowPIss6@051v9!-?e8-RXo^4fo%O{Y&J?=#Oqu*P-^;3SC3yP)3 zhsNeY7pRR$=X9?@j*_iOP*q*+Q#4&D_Y|b`h1b*)x+6hEUq(?{O3Lkumvi!kHAEy8 zGDu!y!#Nz{Ps2k>fy^`qxw?mEa@k0P!ysTMIc+xGk3!%avtK?3A`fd!Gl*b-0XWiy}Y;uvRq2dqOzz*YxaleJA>j+99Y1BsK_~W z!cka?q5a_~O^?XPNJeI6S#51?Aw4IVufS9YDqA1o9v*o;Ah#EmU}D^^foppKI6n=V zRm)EN7m&9E{b50sd-U(MUgLd-ijIzXySbR=9Q-&rBB{CqB;#+|2Q$wAw?Wzt^r5B5 zGDB*uxS*C42-T3j%W;^)0T{>bYv{uuPU>((z$DC$tvo|h^HjryS(vb<9q0^K94af- z?|)U=XP&C%1jpOMa{=AB7E)#7KQM4lsnt*fB+q<@0514|CKuhAiVBqU$O?Blr^3X^ zd6|Y=lm;V8`)-;wAUuJ18gOR{s@eKq1;02xU=S8o&E^LFAs@HQ=-HP^nJAIzYbsdl zn42G0^S@D#e9L(`1hO6c5(n1aSAZ5fyMcj0Jhw2}Bd$&=r;Zp16o+&B9Oqw-R&BV) z<<>V8nASO^+S0>qx~)6l8bZXI8LuL8+i!2dKO_Sc4!&Ah5z93BvS+NJpa|OBG73Zm zpH8Q$72ReQw@xcStv_aoJ)c0wXHr8!HJ&$SrJe0bkaoYZEWYZ?7Yg}1w=TESCz0S# zg(zt;4h}h_AW>aiT`9bnj|GkaC&(!IZs4$?i9O*8y+t-@GH*nD*Xt_TlMElIXf<8i zT=P8#B|?SKD0q9&OcWy{V|z9BbEi1quX{{%DrvFb8HwTOCC?iRt42MAH~7KrcU$J- z>Dh!II!iWOCu1nB7Q%}T-ftQI&^_y81AtJxqz`qYu{?&53co&2! z3R$yf#~S4C#`Gd^$R~v<2#V$Ve?bH!=4AGpPMpB~0*yFci&~c+b-d;x{*RaqrfwTs z4%~&QeAB`)X1n2FJw`sh8}1$+Ur;#za^%%0pE=U};jZqTZ0?|ifJ94EnsBEnnSO8d z%f;1Tx@g1SJyDlsBDgLh^kfwNzAnJ>BS{Z~G~;^TCN6Sp{@tV9L#aj6zQB=_y?WLA z@zbXvracEh5|deh3m0Jzyq6&$FYs16?s2c%-kfo%1gy&Piu9%3|MCre6t;st(%Q&N zTdXIQ-MqX64gwn^aDClT3GD3v`JRDD8>HJ#_^&cnrW{hrujP;{C=~` z-Dyw1t9$Lm!-sesppG<3gpwbSzAzJRj=Hpc80GWGMXPW3_%wPgIei{6q}1{y*mq$N&h7pavO5J~$$S zipUu)d~fZVqh6?4&nAKvPJ8p_s_cGR-}6k*lC9kQl_$=(<)#8zM!>2^&$z47^Y*t9 z49f;;V_?MeWZ_a)F0IwXtLdz~6fOH+pLraVD6gJ7caEuD_1w7>CdSFC+zzUn_LnC- zwT6qYTBJ~k$rk>Jj%aZMY@zVoxNH9$Xw>!o^nlCtpFev~CNDHGWo1Xib?GbubdH}j z$nIFVJ#o3cTvYEjB9gs+?fDlBWmwfKS!Bc1xqGO{$!}xK&Cmbw$ntOiL=-hQGyHhj zZ1-gZi@+QWSOfu&7iPaKZkhB_SC&#!iWK_Dn8Rn%m5{1>@36^Q)!j$QX&B$Vd)M}6 z`5AD&rdmvKpt zKTEN9>oA>a*1FjJ-sCDJFOqnLm zCtq+fyQDJEo7ns2TFm{P58+o3Id++uAZDDLA5OI%O|>(*!nyaIsE|I>@)zGw!#1fW zLwftGddf5{J93$Fno>VjeC(3hi2tG{7i8kZC={m>HrI=;!jmOc@&)%n3ELvIsRoC{c`EG#%rXsw8l)+d$ zAsON}-o{ST3|+DddNMb417j27Lgkv0gUKy}PcIr~_1KhO9pkz4Y9WVm4_+_lDcwmy z(`h=VUvO&2n}9c}7q=Jw*?;}*GAIa3SMO!Y44d{9R*T0fjR+XG)rO3Vb*L_MO;_u7 zI${afNth2p?os$WJ6EeAakF`8(nqY#HC0 zTQ;j-9u582_?CGs&$x#fNpOtSa@|YIGw!b*FIaIuTFZ;`oLBzyhMs1JZ&Z)mXa3J` zEFmJNp!j^0TY}jkC9zvA*Mx~y*xK8Asta9HQXqpP@~_MFHZsZ8Y1UOKxgTKX_HB`h zE})1$jMYtsZIO(~=2vv^-3#1jmAqH>)a>?F{O-a{l&n_t-jYZBn9Kl~?B?+nsx^7~ zA0D_ok6?IX%}-|BFzs2mW9Eg6QfrhhUW+$1dmDyo>cxZ9MQo}SBpiko$(OUHN`1%xBWD{Tsjr-0G1lhu+DyQ@cIW8#4Fn zuEmcEGPNd{v#w8`$9nc?m-qYV67S!iXq$gFS7uUxNJQW_BI4(*V?+LGskuHAq}~mM zH$0m%Z^#kv-D^>K$}Mph(tOJ_uDQuWvZ-q*jQSZU@fMe#e{-#A$(s{Q|JjSPXpL)~hHU~y-fm>~xrA?el zrML^b?mu*HxFmDM;O2xwTRgayoW^ge|2VNT!mi<6HqW_GsF#4BU~Ve5<1y&XjDsA{ zUR|@K-=(z9@-Arh7FK&ttgWzb{#*I$Frr zC%0=&eUUzRZYtr-K(@u;K#l2}`m3LNJStE^`}u#}Mn8uS{eS!A``lw=4ooM-=wim#s^BO9B_09nc*?Tm~j$gwqbI2>b~c`@%5w;=gMks4Yp{2zD8xI%x8LT>;CL5#vS?Xbrmh!XQJZP?KoCu!Sdn$;N1=^Z?T7vPy)t~)Gt4tc z!8;xD@gb$7n|7qOq9Pg`8LCEJ0EFpv0-zgHhf0TUxOyQCyZH4n+JzF2SFd2_-LoQ$ ziyuGT6*><{+s_}bY^~n-b&ydnnH%Qtss_Jb>tw%hgjFxnrXbs>=k*oLcjOE}VMFrt zPZWmf;37i6_pYv&nbth*gSX+4nPmE6^FohYyx(LZpLPsTra}mc`scp<_$iHoh?qDt zzF&dA254?67apWecG*Z~W-hC_4dQ%*Y_u6|uULBqXxC+M@(0W}_^coDImBE|f9UlD zk=u*soIQHECzu5Dd8=jo(mnT}JC zfMRs?2}WUdC?TzKoYFfnzO}}@kIM^lYIJ}o(V15|lR2TYphDG!&@XoEw4or{vnQ}o zPm{BnO@8Rx0cIK9iy_6h&Su?(jaM1p#!nh;Tvsw}>Hhvg=RamAdQFmS;q>W4x}X4+ zF?R0SwT!{x;hJ}kR@;OX9;D2b?P$ z)7F}{LgOX1k#V7df&ylPw2|vMqyB4mzaAr{qgE{3pEdsVHK}gOV%)x;ek4Dr9Q-U7 z+&Y1@Ej4C0;Xqj zULxbeGzs7AQBmc_2@ z2}Th1yZY}ey3ty;l zckI?pebol<<438vxvT>rM;||t_HIcNxvvWHUiOrS&#YVW$aU>DEo0q8Sw!<1H_EDJ z8{JB{2EZmI4eG6|v^e)OV{gwO(kbzOBlXmVT)p6P^5ss^%7E5dH z?W%U3VA?^8GriaqdTqJQT)hkHnPFVVM(dM?4r$!*ytdiKySyPB-ItMKwRuqoYw6r^ zdSpJ?z96rzsC(Xu5bRM8Ys@(@Y>=4{lw{>Ul(P!S`=>w9m5*ij-PhR z&|inAiJD7IR?lg9Y}+{G?C&3cvRml(J;cY{XEYnKa^|Z>0EKgn*tBY^Z_~-wL~ay3 zzm<>_UGYzq=&gL(WjE32vN}+uj7l6wTJZ%DR5Fcx%{HAI%doT_sAxK_%UT_lZccsg!YIf4Sm6el3?K9Ij`h zO^NjHzOY6DoU9m$U6x08Mw01w#})``=&D{_a%gBJVq8)`DeLfie*gT*<-i_@?r+f$ zrNU+?rPLys?=Jyx!_U;JEy{NT) zAiv5mNMKLGd}Hd8KH8*>2ZB836{Ushvs9G)Cm%hI~Vu^Cs2%Fn-)d8YZx$!}!T=#i8H zDrElrc(cUS)|?$Gab;vvv^mM(3YZvz{_C0FrvcD)zi>7PFrOYfHhbw_4O)Fv0>&38 zc|kk-!o|v0S0cGgY)x(}zIfrP- z^*b*;_XS*nolXS?a^v>LGMw>P_y5aM^)jX# z-FQn*NkQMr)Dsj?qN)2u-5`)2rOIm4YwuR^=>nbAbVg?tH!HdNnKR3uJFV1)=F{M9 z@pXbSpFD+boO}4l^S-prWKR&sQAHaXt4UBC@C}U63cO*+@A1iEKC&*r=Yu`171z&R zP&t+W_VseY;S`KheIVU27s*8eQa@SEz`8owwB9Eyf}w}7EIS5%sP1kz#eOclO~-y= z0P3FZOZ!sK?VmG|_nBogl#P&3Dziy8UShLzWNyX@O(W1Po&4?Y-FG*SK4a>U9X0Lo zVeZO}tDd_NIw)xp;kbyq9h|5CvfBZx)0AuF_br$2zBawka_zg}u;}RX!-TGVfhj?W z&CWdBCyLk7(&C&#|1fJHpvq<&M#)Cgp(4>mD4=V@SN%+ z>Y&@@{fhzZb_7r;6qfQjF1N$pa*(-mX1c=V_3h13xg;LNsLC0bA;<(4a}pI6C%R=O zSALG6{k64o8$_4L3_n2x^ndLiL;Brwuh|gR75#@J7+x|7zyI6&c7zH8x%lzvQ9pI3tX5|p@PYS2JD{YWtLjfTAlyeY{T%$QOxrmv}I}mIxy{%)g)2MlKo$ni7 zK1TS-==sMjWVj+dZZ#4RDf~akI`8X~Q&|_0O316G7!=9Tuv6$trw=JNz#JMDR|vZT zhf{|twP04<q^(`5^GGX=LPB|6b8T;Z z`>|433EOx*-8XoP7p_r4m;BBHoA(+RG3RmP#qf5}KaV@N5a7D&uN{Q{^TvN(UK;7$ zyDyHT1*X|f?F6i$c^SnWwoH_i!{Dl`7$}``CGTtRgf4ChkP{`svIlA)SDEFdbCt78 z6D3(o7iEF!b#H9`{wScDXN6W}U;_QtHv8=RzEn(dt&!s$?a*>z?TSZ}nyJ9|9T^FVUGZ%sJ0CKvnNG`&r?A+NUE3ho&b@+gM+6yx(!oX8b)iKsI?LYBguqH|9l2QS3 z?%KhUG64#cBMd|3YE0E9^OZelXM}V<6l}t0gaDWPq_d!;_5K+z&9NG8N<>Q41V9dm zgtLQMS|z(~Tzj|c@6Ps{M~#WFlbZio5BRJ0yB5VQSMao>wAHD=>`Ik^ky((jsUz1S zBwQito;CO{m&^1>z|Yk$hRzyx_H{*(Sa=&1 zHqcX06%SigK>C;r73tlxi4ZiZJjWBqx7kM?*n=~XamGmPS+(a5h@$)5I&V|6fni87>X}rVp zawe$+sRL-VnshoFUAiiNYNh#MF#VrP!%ss$4(~~i*#6O-12|33{`ZputJJant{FCw zi@FX3;CW>~#fMvmMMq)V;KI&O0#u)9Q&W5qT{Y!QEuP6AtO)IY zPjvbE%AgJFnLq)I$k8jm+Mmuj9k@1;RrGmdKx4vf^Eu5??Klifw)>i+J8zHZIS&_pEV)IsNt%QI)UMWYJs;!3;<_R zV!rNGa@?xX(|8D7Dszr5`qhYy0OEf&Rm@TPeUm=q4)L+s4oWBN>V*ncPOz+O3XvCG zZdk)3fa(hC6-^Ek*?z0ag{rxOBJ<9x8>qd@z6}>d1M|)Q>A7=5L{n*xNA=W%eRlgo zOTpkgo*u}I!uSs;V~6i7B~NmD80!1`%Ah;1PbfH_xtzQ0FSw=4ou?U;TFg8y(|2se zbX+GQbXSL>{U=Gey_TP_(4?HW_h&P+?Koho6ReAP4VuOMnK0d^1GjMV+VgRa__wYi zrV~q@v;vv+>-v;hcx+59ASVCYgoxrJuTz(Z z*mNcA?=A9dP8vaaSvW3+%eHL59nE-i*&xx-`x;ncfz>$TZg-mqiHWtzd{ zTV!v5VfoZI+>Wvc-&d9kNuUj1plOwFxU)Vh6@zQK>BRWyAA_lAw(ucUh7_yRBrNQ9 zEJ1IKndoeUCnz6%xHA5NwQT5h=@#lw-c91^D$yvRIU z6>54x)+0oI64|H;2GYa;E;#jA6c1JdhA+PMQc{up^1q+}LP;qenLO-fVevzx>puI= z%F0mVz5`ePe;zH-3_oQ3e&X+Yf{%9v6@jJQx?9{!sxQI64x~^%e!+ zN#>``C+VJxSmU60G4tX7%$tS$p0i8__tkv(&wdLMud52H!@Z9(J5lv~?>no$_0@^8 zHe3W+>GysFAJC0to(e!;%UPW?sL9Kx?(WUu`z;4MdQ>X%jTCF$j}Cvei~mOy(*)qr zJTLUWhBw`PMGIs$8!yk*Vrm+k&o_@pk~64}gyuftQ6kyW4t!K#tG1m=rOlhVge{e? zkU5m0q5s{{&<~n$Zdcr3VP&k?_=(MGq1`1Q80$qwMxJo_kF)&b1gTtp6M8i6RchRK z8}HdSf`X+8bmbABmMPGZUZ-k27p1lc2A_C*^uSo4_vv=G;kYvYU(?8<2DhlGxhs`Bh**^7Ase&Dx}dW9(fIziFkA z%hk8M=_d$(B6$SA^Sik!CL#h%5?AKoBcr$^xQwGyyF4MmVR`*ka1_A(W79LOX=($n zL+<+I{a(7#7kh$iTMX0UO`OQ^hTa;p=bL+Bz9I10cX-H2&CbwD58#PRc%D!lUDXGTgDmO8QL=RPmsSlrw#Kk4v`MOCJ1GCX3t zBpCP``h(2i1c_pzfHt$pRSp1MSdAei{ChP~9}H^R1e#-_5~JC}sXK^8gdMW8Hs>7{ zK1I-B=RdRMO_(Tosbk8Zqu{v`lj7&2bsp{WL$dY2X-Ytjvuoy;y+{je-Vno?R*^O{ zC=VSjZet7lWm%3GvD2eoY$v4l248vxtUO0M^%eD;VExv;YVl#-ZKb{5#yT(2Lvi${ ze(^u6v4ew_YpeBKH|yvQIB4A@A(iVDU(SH3Yzjd>Jn2p`%8>^iE3@p?A+h^@2D_1; z`l^i)oX1>}Xn*E%a<%-dXpUVzLsXu=L$^O= zSQ)m?m;%|hx3Va-AE*jyxZ2KAp2LCw*V5(f0aN64RZ+atZyz8u3Iu&IpbH|tHm+~?0tuMN-g0wuU-bEmFR!0nL#)RWs8D-6i->Ss zbE~u~?554)cN%d+B0}QQd{Da?vijg7AuwD#AE}HlD4;q`BNv`5JF%TwYJbH>ef;TE za@)f`^Wx-kw4nbEgzjUqYZ9P^R{dd;1!|R=pOy9=`%O&=j+fEtIX?<~Vf_Z&RrFd% zXHE)Df}u;+DNJjls9g~63yR9d4xY?lq9;8XSGKQ{Y9qzM+{`cEFY0qk{Y;tld?ZUL zsNh-0?%+^`M8bJktQUnZ0BwMb4Y$O=t`ln^Y%v>*IZu7E~-6aSa`CP$S{Z)&kp2Fi-l6o6aQRMww#R<}e z`rfj~x}9F_>M&hUgd)w(G{Cxr^+?Ti1G*EPQh0txOjA1P%qLUJd23vG!ev$WC=_Ii zgc!VZM^uy&1qq6g7u9{Om6?3MeRctIUdose7^NgV9l$Xk`{{LnuedF8^x|x9QaPXD z`%ur_anOCKnp}aiYwHJ!bUK~himOnX#5I`@mc!SDgN;q2@XU^zaCIUpebw=dyE2&e zEP*+Rs_glNv0=v9)u%|aFH>upFR%5>R`c%x^OURScSRQ-wbOVtb^(1NGoA)Hrg8C- zV6!rat*=Rec9zpt7okIZ26+M7Q)kbVHGzzq|4Tp{A*fDC*-S0itefna3rf2JN>2@B zkOD?>VK-3s7k#t`GZK?2?2*xM1l#b zHXD`-wgo-nN_GQ;^}T~*O8#tP{k!J*hTj{h{+7B>Nuw+1EtD&ZAF8p*@x+Wt2D``( zW0l8~)BN&<&$VZWoy33_xS5isc*PB~eF3)akzV4>M>lUjV4%v^5u5nD^I|6pEKDJe zJ@pyt9by_z$dxofPok}1?%Cm29>vQaOjp#T{73e^y%r!lWyBv)HeSuWJS)pdRH>jo zW_rfI-3LO)K|%x0dLbF$$pF!BSG+hwam)z{%1BL&Pta?H2upk` zAO4R9?S;?PJ-Riw-b;O|z$NI*xf_A58E}Ii78E};%R==y6-xeO7HKKwT;=IdtvTv= zIa5}{*Ez^P>s6I1Dkm3mWF9Ypd+*+=YY9%2H41!Jl{otWg$r9BePYA{xUZzc9%#7| zWGYFIOSJX*>c$u1i|6hDxKz3@@(oM0XWY-&tzQj_Ic2ytep%)uJHLDiKSl+<4ZK;Q zw5QM#+zbBKMn=URYl*16JmEoD=Cs8%K1iRpH;``R_iJ04Kqo;DW}SWf;hMjNp^zE#_6o(3(&{PQYw?2Y#p2bDy)NlUt=r-iV6IhgUB%hs+@F;+l$VK}QLrVsd3gyyo-id>v~!Su6eN z=e*avpdcjyjkFjf47_;jvBIlMlE(yg7vxRc@Xj-uBdYvF)qe>$PHURLkUj$#9cYx> zblVtF)XeAQxKUd)>27-c>CM=vZP?D4h0LBib3sgr0roX_V${mrqx0^N$>mWBc` z6cC=>gU;eD)9!_&@fm!KVD{YjPNzur(qCe6!of{;2O_clAVL5{D}0DP(b9y9x9cv? zzp1Kg)016nNOj6i+I*;Cb>kogj-CTCRY*dH#qQK6?#Sbl^83)EOo*(-+wO^zqrl8e z6Y4x`yR{cAea}w%Y)Z{fumD_B86P&}@;uNiam$&3PaTq79dSPT{^Bikg&?wUgB-e& zfN)XpaoJ@NN@e{4!2o7iKSkNjXXyD@rd#j2jKF{_KBn~O=>|=}>vl!~NXLDe#gvZG z3VoGK(;w82gvCr2aB2EH{QOI&90!03AK+&$Deu)@I*d&n~`HC~~X4hmMA5hrc>7?v&Hzdexo2I>uXlc8vP| z*#_3kdG0D9d=xAz!2jh-MaOGElXCXu{K`$q^@BcaML>`e@=~%155@WUZN*JHB?G#h z#>vSQA;5< zYq>>gGU;q`or+eI)T2~SSP)l4Mx~^J^uFy>(tR{y5d)|;qB73F$6;lGEx^|3^ugm} zsmavUNEjbn!X8T1DhIfM!6224bOZ?ld=_^Szu%k~q!ely@*$VcLRS)J+}38akWPjD z-L9wyn2@LRGm^QjjPPKSmA0k02t>Ge$#cs%cn<)BQFc?PvIsdpN@F~T1UdnIC9aQL zFff3aYy$=6(CVFe!o_juj7`k>c{Qox_AI2|6qAeu2i`D6hbg4-=zU?F5#(GGl6?JX zm3{@IO@Ado=qQDC4b9m=w|vdLIm1Qw8Yy;>P4Sp;F^=});GnzW=}{G_je+N86!+V&Mu%)+E>D6+TxwjPWv(;As%%1y zFDCifGMJx|y`Whq4S_{PJw(T1QT(ydU~HYd14-L0sqIBSH@Q49@-h1{sO*&@0(kjD z!bKz4>^tKYsQW!bIJg9VoYK{tserToIc|7#?d0oMi77#==AR{d+J|6}43KZ1v$Ks{ zk0>ZB^Ms~x0b&{tFq2b|u;RZ69}-5#qm>18)e#;-pm)DX09onn7#kyVlu&I&^33{O z2t1h2pg`v555GTRryVn97o5{1gagLRDh+*_mDV@h?03#!PH8y2DwKde6ZA0dw_xma z`@3Dr4IOX3fj7(MkToz68aepiVHnUr23=kIpX{RHY@cw{;f1)((KB}>B&1!5ICW>YDK73?+kWhybSl!l#br16A)bZnVhKO?oc|J{eNXN( z5XvD5Y`Y6hF%;?|+~Z9BTHHD?2d)_cD1WGaij)!aY{)T;>H+OO;>-Ps%Rl#G8k7CkUYQSb(7nVVv(%O2zz%?* z7NQt>9*e;U7HoY)#lJSgv8Ca1(dul-<5!>$#oA#4v~;-LbzglKWCm!&kKvT;I-f0~ zFTi-&K2T`sn)~A49Z-ONMQm560tmE69AO(yra%a_fsy04NM|YC}AB+6CW%cmw*I%hRXg&3#1wcCL`VBc?VGl zzy!)5nmGZ?g$&AJyf|P`UZb0^6FLi>o_Z+Ap}Ot08gO3A=O&Ker;9Ol)WnHPY;z=S^yQ>DuID$Qvt}~Aw}>U{-Z6iIq12)IPo9K zhCHz^zF7Ow)lgyJ;<#j^LoM7mYC3ECwETorF znhmj_m-Fd|(okbFmLzSmeT6fSuwD+QxgM6Dkb*3q$GO(dNUQ( z{qFbCwmbC+)Y#%X?@ngKo=A#HG3={I^542Z6hl@KMG-@hY(kR7VL%$n=CK^|!hDzT zL}KSM`6Q6YoG00gXZ(dL&yGjh)9tD1BOcBzE8UTD;h z-#&p4!U7o#PTF#_PFz0c)u_7+sx2L5O2&vyK(+FztA5%c7MpnHZbkhUDBcU|njS(~Y^bqJTtlnlvMP8ZJe2_>G8 zdfaYeyAx62OQeD!E+O%q1Z*|isTnZK8%8E#I8tQ9MnORl!)7GR07MA+!Nk)0%PTqC zH|0EjHaG;#+8Gmd&TBDKRSpu4iyag|$wNa1@TH>+;y;ASY%cdEmjcoYd0fQ&`KIlV^S8WER^}Ru-fMKjtE4SB^Soo-NVEf{KUrlTG?quU(Of#1s)oooBT+jZno`6u7S~b>t_}J1T_!3CX{C6%MsGU0mv_2L1IiXR zKbj(mv6*j+@;u-Rl18GQV@4X4C<4T`oQtzZOd7EtRdb>rk%~M&^)W|``09%7u$6S| zeaUgE@ZRdFitZ14-e^}+!{Ix_l5yG5BlC*5)_70~=Psf$rpfFx?(&I}OSUow2>+j6R?d=Zl{?!YBRe(hD$(83U9+Tu8r* z$V5Jf3&bS8-!H}%DRqzcVUDuMrFz5pI@4vmgn963FVMEg$RH2()J{Q!fGOgHuJ&fE zp8ocrK@}F!)_JKn?@rSNA^Cv`f>PSrJ-j4LwX}%1!7Ipz*FkB5kh}eI_UF{1Nc?_K zE)}v_4eXbV(y2E+AN7Ox9y~@6={`+t>wg4kvjqQNBGbesLA%B|=jCw}?xsN(bi5N| zck?(ZT?Wb^P+1sRA2H5>9wnNB%j@Z$e!+ewbGBc`8klQTEQ@~T-NF0%+~M3TW%MjQ z{1eJczP{}jcaNse*6d$^E2qh3K(DU|z6NU5u)t=ouW}@9tyRdSoTju7Cm;i8o9zRkKAqt65tlh4qe6W?c9{potXW21yGbX> z#&D00IF`X&esUb$#Ty2-p7BTD;G^xY@tFegj(P)1c;}Pt8dOy7V<$G@zyM{O25cl# zn7}q$F19W6BT&`4Rd-T??YsN{IrDJLE%K@3UGQc9{V|p!=e1~jp<5~btobXC!bnOy zin|opr_so}Wnt-S8VV~P(;eKmPzji~qn+k6R^Tfop+-&;#rdzz-I1hFwpz% zw>!1OI1*Jl5j`kUmh(7hff^CFSZr?oE2`C(J}jF-tXDqM!$>+?C)>3<<)dZpw1^4V z4@1uz&>D>xpTy9FCguoVNoPm<3Ly`{%&@N?>ufS~aZt~xAj`j@f66{npjUaT<+LS) z<$V*-UCDDq$EA(}RznWV9lse!<}_E5OXB*t|077t9Z26|MnYa41rd6W78-;LW1B$w zQutm*?G+>5xOp#=w*mn$I0UH>gqTfGLfo&Ddv#3ND=kwYN8h#LP6YRzmb%^xPBUrb zWw+Ho{d%)J9I*o5_bI@VXUDeh`cVAh0+=ADtaabZd>!HYD~jBySWu& zqLTh#{{5p3){|{OJqK7F1T<(fS#Yu28db8S^YUk00bK-qLjI+PaS8|~VsNOWb8YaV zTwAa`$wC>N;X+?w#5_tqSl&K9-dFI^J-&72D7Xj%saV9??sy=5cG$`HW7N9pC;FXM|ds4o< zp;{sCGTz{lx;a*A7=c0>g|X7#A#O8kgLVOALK2+UE~)Dyi(F69D?0a&y6J@{fbt__ zWV#5QDAJ7R+}bxf#|6*aso@A>9>cUEBek`%oJea7x59hhdeWGR)CUOq<5;ycU^>S< z`-$X;TG++dM?T-=KgM7m#kS;b%u%75Ek-Mw4&P*IXG6NtcqYYR-iy;n+dJ+l`UQNe!x*Tk+ytiBk8sgK?ntb9xR06o`Pl+JC9TY3K+b5= zgExPUb4jkjI0;<=>a4OonGo<))bBx=Uk_knpxQgH4M_p^Ss|TbW51fCi!-OS(V}$n zTQ6DsJ0egJYM$x?QX(}B)M;Y z_2~Qnh5Zi9mAleXlQnK| zb0a>KX!Bj=X-fi++g1ia*+HZfdBSSE^gUdR3tzzJ z{Ilp==?;i_;-@wwKhk%nHab90@{P>JXUlKHHtxMt6JkL2=_|g7=V9)b8$1+s@TW81 zwML{Zi6BNoc^yG2pnePpO$(6E0FTgm>+l(v1mqUYcE4rMBF_^C z5~gvyMJqC{ZBE04d>Ky$=gW}GS(+cu* zg0If-iGNN50Yt=d=&i@PdEX{oBBz$)telgXt&H(nDT^%+EmkmciX6hs$n&R0cVfw= zg``JHnoW<(oDbCk)1 zi01iJG9}@BsS;x{V!@ju%~$ZTdZ4{2EugI){m4dlVS?We4H51}U+3G;H6dg7qxS~f9(ngFOsle_G#_V4DZg(eMFh*%o+cFgYuO}YeH+*;qJKQA zVl(6-D!q?hCE)iMahw|ZB~zDo*B{r}y{Jy}x^Wk>E$kW*iCh;V`oTAUC_^RGUd?mw zht%7FBJ_*M2SR9W=|o`X7ScuWI^fdGNp!S_YJ+RxA!4Y^vZaF6f`&=?xFwObi^ILr zw)h;NC?${xp--ukdMC>|$8GxvZN`CSQ^t*XtV}qM{sy1ZGMBO%t6`mmLxIueOue46 z{7jw4dk}-PxtN6jDj_)#k}Ltu;=C~ssWjt{3N0KFV;^Ug0tz;a3eI5pvG&Z{PU$0| zwq{9o z(M@1AD6MKkA>c`Fb&$kvnw2`fN1$y?(F7PbG#s1+JTHNhSe3(vka^?Zi<@9g{z2Ckz!*TpJXCtsZxi4 zxK?KhuoWt!1Z6$9{^FlyQ|#S?wyUS!b6WD~-!mWO8;ZU;OS)W;PNiiSIPb>p4D%}rH11#;fDqH^|; ze0MtUAq1!=%Vh+Rm#etkh%oAQ=Htksb-c#S0i|VT=A#47XI*BCS!BVBK!J{9zvlsr zi1RtuerhyaXO%N4dVi@~;63RhTpwWWv-%^rL7wr?LjLe=sxEu#^#q?ygB{D$JOONq zLRthiA2b-nigI<0_3_Vr^6!R}Ya*pA0-++&kb-5eng|fXe9#%|55i$`L}d!&tvCaa z4JAti`RaetWcebFFdaiJM+sP=3Ekr)eA~Ehd*u}YDrmOl!!m(zn!L`n$A^x<><`Ae zrUG2U=8eMJhwn^m%q-BIw4pMSk19=>Z0eh`K;ZGR0GWjS4Vzy4pc?5@0^(;3o9#C> zD&&3zlGxVB+cW4e+!R`y8tn~#OmHl!?jT;88(fdOrAIqZ|GWKvo~a7>mI+JBjogLZ zJ+g^5dGK?w6Pkd)xA9;G%TuzK4!~((s*suN1G+U@!1fzWeo-W{(9^yHuGQ~9A`Xv-pEU}9>Rn_tF)3?%deqh>xm z;5MoRN8B7cqj-m?KO_qTssFfJce%Nl>4#56T<+h3T|9{r^(N1_&fgG$?LF;Fq58F- z5YXKj5nrFDoiFm*!q=K^7bI34AP5h$G~PLG3nwNX^)vxK$^?r+#CdhVVynEck}wvm za%-9gc?_c9?9Oy;&g*2V2V-j2KvPb-1f)>JmO_XnMwK9L=>YUrG%oAIo|<8OBh3w2 zrjaTu`J3uU=)j2+gWU6292KUU?CrC;sXN^hbt7S!Hg%Vt;? z1$1E75~9=Cj2cMRe5P755f3zxyiT^mOR+Gj=zvezKGWxA{B;9}#K=AGs=j5**<~S@ zf!$vh?t5lFeF62Y?2SF3_oAM?6AW2^EC_nYlL){cggY|gtFj#sX}y@N z=)K2QR|h-Zver+=L&YL_$O^7X)zHw;R$lOGJJoh0&BYJ^9%0O0$U9>sq3qanbc|Ys zFFpsiV!-7L3#k;sNce`T!nf{BRM=SSZ=k=3Hm(2!*U4-)A7sMrQ2=bY)T2KoWVRD7 z5LgPdZJB}0G<#vay+#V&-IzAg9+KArF1mp~+=9%T*`q{X z;61#~PrkkryagE-{}I zIo;*p8qrYjnNs*4YbqEBLZbe~ovh{^wc(Su?d^Wx5>@Pon@#Z?#$W*JK@*dDc;43^-rm74e4mW)wwaj9F4L3kTmER^Jq04Y)n;`MIRlmsU0y>Y*Q6Xf~*;?jyY8C<@Tja{_TM9 zN5XVMLAB|*wMaDN_DHhPvgXvmQDCshfl?oiBx**hkY9^~2y(H_qYhb~u2wC;D;hg@vRkHvM(-q9vnM`9hmG;weQ=d7pxIMqr2$;?!TCM;j z5SQZ#h^oIMA-{rL1L!C0;b941yevh_?sFhvZU%ru$MZwW??b={p*X>l={L+es{6~m zOk>Xx4t$O`69hzv_Z3#6tJ5KGGu!TfR%C|Ur4yqi!d6%E6li9KNAU< z{L;b>Qz#^EE;R7jm?hz^-oU{n&tDjypNI!XL5>BX>Cvs@TMO7HB}+(kC&1DY73zEr z!M;WtjI4gQ(-QLaqDqB_@oPM~1r%60)l5lrxoFCOUdB`ACVw)DYd{=ML*~8f*X!8h z%!25-SsTye^~e4F@ejZKNhGx~%X-3)q)**xPV~Ro9s=?)5Dab2$3e9+2SEX?avC#Q zLS*s9n7NQfp1SHsRT_zo(PW9$Vg zFXvK);srhSbJ1=9I7!V^`kt2;Gc~njF$yAqiZKR~SH8nS+shj_`#oM2$=vfA29bB| zT{c5O-9|c!Rpqx2#o)zZ@X__#UkEb20gEgP0jn=dg?ts{vPJ?!5C~w<@nTEO1IWW_ z10jqw6s5QrQIBh^>`BuDCFTV|;Ac|XV_5#Vt$$M4|NgJ97B0Y6Zp?i&YGPXHl!O0O zQEy6wcMl8?cqutHG1Gxd3AUZ=ks)=zS|}b(;Uhp>sHmH!BF1P7le#%r4kB(eEc5$s z$rmB&fl@@gnWowgqXgV+T_kgaH>%tw)vM&_?ez3{RFBTrI&3z>8d^d&!-MR3r~E&kG>4B&zvl!|w6!X=e=IjrqTyMU^jt z(6IqRqQT#9{`YHNCN%XtO&KY}ujAt%LSdfTSM^C%(0L@GlhoeCtv`?1mj&r?tWqk# zx&Zvd1;KOTdqhnCeLKH!0{Hu)7qrGK+dGqP`r_jB z+wD8bDCWw)bAx2?^xA0+DinXc^ZO?LdSNI}o>JAA{-Ys5*h&cqul?gUn=sM9hx-RL zQT+SqzO}g8g3y{dG~v=&gP;zFKdbiZ>$FfH>ixJnJ2;$*{3_F2_&dw0djoben@@2H z!M{6{*WZ_76P46n+UbS=uh0AKt(!2=wL@XBJ0uAQD@E?}kNIf+EJ?5c9I%TJ^7@%| z{dxc2ApKt}m4nOccASlD9a`q#`aXd0MES{-+tLkVqu&q0P}#S+VN}FK>90-s-@A{z z0sHMidl84Dh0&Ms@6Q#cMBF0MV1*E@KUr&&0C+wpeJfo$|9dU|JTl*I`2C<_rD4B@ zu|jWe{{BpGqu|$@gSr_0zh4*C$0=snHj4N(Uu^W}hIem)g{c3*?uh&Qrv8r?bHX6l zh0!f->b!<+AH3ql{e1ytD$%F2-_q{2EBt>un|NG*;Ie73dzJF-xaPjB6fsj4KV1`QIcMABQkM7?u7|et0>;SzOd*{f$m`@_; zEN@tc-I>TqE`57@7D_KBW)5mzIqX(JaliMNY!e>XuP3i^TG;=MmPJW$PM1#JI}CIF zfp>p@*e)&7$d9XcUvq{JO(?g6Ug^gvBouk!(e?%Wx=CMTN@~%kBH~IooTJSTg4o>$> z;dzICjk8>uun(BOLW}_(P%cf-<1V+*#xVxV;WEev?g~6S1mbN!T0<2kK3fV|Qx4wS z0##QyJt!zs<9q}%=UgDOxL;6w3;tEA?)VKbB>+@5^rh3P##WW1N^RR&%+#p31l+^? zNE408mTMJJBUsq&zDEur;tVmmNpc0SnxQH(Y8h-cwACM1rzH|gCLKCk0(+MD@gi>9 ztJj$LY)uX^zZYsR1|i_m{cMcS^7msj$8`tc`r}l74R~>t#d~qhUJ)MNf*h9#WQ}p4 zei&G5$7BlX7I1wbof_wl0OM-UM3cO<`kn``sAU5Uo)}(-(;{aOyhXz+yC5t0ehJWu zZop!g4c;KI;d9)BYr}xNF>LQHqq^)PKmrVE&u-a7p{13!(=j^@Fr{)Y*s)Eh7m#$1 zk&k0!um#-(WmPMRgzjws+)-0&=B)8N%G163L(%D{1FlPNS_)lxwo-vvY7LXFw!aEH zQGUxwjd+J{S5i;#zT@_1{`{O6S{r@_wRbgdSd0wz@##%`l1HiMoRQ{RE;EW}sqUVn zMkHbW&QjfZ{dVpAguKN`erMjk2WVjz%WB`X-{+OTS1s2{9W5p-cV1}2{AgHTLn7$@ zuvv#x$W#6Nc#G*^rJoS(IGRXs6d<~+aLup%fl7^Iz)V$Y^_|h>HW7y;c=A+WV^q-d zKW#>)S*OBFyV||kPU#COJ5t>k5RkTWIIlhl6&?U85(4i5GD~oFYzTl0MF7>)8C!H0 z8^eK@^)bMh?0wM7Q}Nov`vE1#Y9Jrztw?NczL4>%Jf+Z!Cn`WO*`ROducitr2+9P|o<=9fD>WmFagFU#i;Pd{yo9J)>R3`cM z1%Qoyp1itn(cS1vD|uj5JSK-zILLx`0h2L@=|_he_5 zr%)|hmICbrEAkZV#ZDPAAG4`49(Kn?*({4Z^^c1H+Fw8KjHQ!qpb7yr<^b1FysbiJ zLz#!zyklBNr<4)}g5D0$ZJ=JJQsR_t=f(sB+B|3kCMkj*s&MM{av%TN@aTn~a1*B= zo$`Zj5hlV;CB*>|FUHLiob8;^7Fa3%CxuRqxN-~D2Kf%d`=zd0%P(c@A$o6Rtp}q-?;#c{J9Ktpi>7>)ziYY*?5t$H|Mt+fb^gJNe>ZR zD>6Vvq>xTADI3|xi7$J!Krcu9I;$6Oi|fo*RM}2dmEaym^Fc1$g90w+=8SdfJvhX`Crd`{2(P9Vpd;~He=B1YbO0lHj%t0(}h9xkij)(55$Uj=dI zeAo6p+?@Lg;E8rzCN@UKW=O!oo-VUJQO7&9mgQ3%R!(FLkM4&T)68*Xb67gQL^CO8 z3>VBwy6%i+BVSDmT*YmS7Ix*mKQP;9v|dVmZu4IC`>NE7eP+rufr;^N&^S=!l&c`A zes3A7^ne>@^2ZfMuan3pnO=YY-=$D;zcF&)03YR+{?q;ff-66Fu_hAD>-v+KGKTdV z$dZVm>&v96$^pH&4=Iwg-!{u{^hk=gDUamGS^T$X2iU>+_il zXFeX>T`Z)aJDc%)ra>JNEVpjwJ05Ahe-UtBwMyG^=PkK8@Y2R&8P3&tFBhf~@g`h} ziOsgU0vuOH6chO&*B+!yQ-W!LaSMcw;GJ z0pIT~!`BLf8mI_!mr-4kAl=XKh!*K&XIe$8Z>F#7IN3z4{qb*02i(mVt?xrvO&q<7 zxH7*t9OKOm@>yvFsxD|!K3N&@7T-RE-zF2DO2KDM74&cfq9*U7uQ!Ss@2}-Y5p`%oqd@TZW5S5DD(B65zRd5(@&W0xsXe>KA^^981JQ<*;;b*` zZ+AzX zfhIaOu+@avOGTR$c=;V?+mNTMqiCh|a0>KUWQB~EnRkQd8n)4a%lW+LVONFiw5}=u zAaopRw<&J<@!^U+9!nGublXnR#U8BlaAJ6p;XBccpn?hX5h=jq=O!#q1c+p1-PQR9 z2dMP8kg42)oX(?uCm@gkk)Din*u8h#popM+|IrJ2?N{u;+b3`7n43)AtnV9ouMj@z z4w%3{Ke55Q_pqrWk{;*&eMTebWjUA4ggSN`83yYud`4>bwy{hKMz0Kyi7No`4ZV{< zh4;W^Dt` zyQ~!HK0=zW%6CpOi;1rs>F9Yq;v@nlP0c{T8f`veWf<%=LrR4}?hfodTbDE%y*D zOhmWj>y(B40J=1BAXXq6?UM&shkjQah8Mcm^_9=rLZry1l_%)ilde`uO(_$hBakF- zX|Ro@Q;rA7%38u8zbVrV_bZ_5r`3dqK?AH6#l~n+0_gKl046*eKq+mMD0-=n0cpQx zzV^Fy(?%~zx>e9FVTG|E;IjS(4v1^`>@S8e8`f#Zvl+3D$gA$dm8}91VIp%nyd2Nz@ffjKW zU3z5HW5nA5o2kmlWt4uo=+sInk~I)P?e$^n>{}w<_C`0orvQ~8FQLn`cyd1r>|%lw zfB*i@y@k6cS$;7ZvQ0Fg-RYF*a=?04^b0VfqJr>H|ADZxQsS@hzo{O5xX@to9{F)m zd-?dmOcjyT5774@jUHTD@@%(bq&Lm}0<=^VHF1(^)_b}z=vNgR3bmPY3LM@Tyj<>G zEm+=}ZEUOq!npDs?M+t0x$ls#fa3)nVIcDO)CL2hHw^uU!IE|d*#oqc#7(%=vKe&~ z(IZ)0P2Yr|R~H?s$AGGTT`t*Na6!oHIM)j}+c@d)X2ZZqW)d;lOw_pphcicQ*{|v! zRX-G*+Ft*8QLusa<>%UtKJCxF|Aq-*nb&Ele-abX z7G&&-deDL;4Vs;m=>5t;<->7nf}2XDv;{=Et()})p`W6^hinVEXKcJX#JPfidP(%7 zXU8K!2%C@n+dV+DK{)OF~CEg+exftt+WGsPRLx_aEs~F??U99jO$>vvsFo|i$ zh7i(rYu$DTB$<6lMIicZ5u~4na4wwy){Z|!cA{5qMWa<@s1b^_>y49V$n&ayl!fuB zkrB1;KHBWg#>OYD@{QZ<*P(0;54$LDiwK*(WtzB&igpZd=43qBotMaB#ziZdEw|Ph zJSee8ba|K_DRQOV7D>i-`gk>c#Y=Zg^36MN<1I!=B9T|}2Thm=& z0}%9Si#tgC1lY!GuxKioiSkha0u@2%m3}!p(7Rfyw&ObG@Iygf#y-(TvtQ{W-7sk@ zZzx8{yaK>~Yd;(F8}c@d_wx8zDLP{^W;b4ajqK=YhJx&eijm-{FoF20DZKaZyDMU+ zp{Lkt+${oT`rWQ}lbahWD0bHeD_@cMCE4$Bm<7+z7mccjaOQSCb^fyECGku7>Qv17|9h+HlDfqkd#+GPQ-^pd%5p68F$Ban|2g( zCOHTT$-W7mz}r!`WyrIj&$)B`VQy8kbG(#r+i#EP=TrOjDorVwwVrm<+}qtG=bNlk^tP9E=}OS&!sL1KDy(kU^S!4zu5ZOExcdCn$TKY_sh8 zRy6>dd8tZ)n^P#a`aW#)BDolN%;48~7oh3-V*^g02<6u@cn!zF zYb0MsdLo6_aWQy5P{t4#*K9HWo6D0bALgIk`C`E1J|^>s742l%2B~;gEYiyN5j;_? z|7dS6>AVVWKrTAq=6DNMVb>E0P;SNJhpyy$+%)TCB|Eymc;vK<46qgxX>zw2q@ld& zYjvtm3B3t%3d?QzVk5MEvS_dHPS(oF&{<|E{i-OQH0(3wEtradL1pYp_t4~=CPVPs zKVCQaqn^Ms%{&;qiF?xX>ZDOZ#hBI(4dEB&P-8?wo9Oujl zd%%_gx|#Btzhv6DyQT8G$Pcjn!nNyC5snRl>|f6YfQ5Z@{ItCe2Z=Ooaj83j5Up;6 z;QoE9=r|B^g<6{%H7$^&O+RE7ehd1vs#td~MNgK4d|wcFe#MV?dNUJ|S@*LnT>b{< z458N~^d(3d8FrlXFtD-t4S9Mn`YAX#L#IJqHY`8am>9_`Enplrvl(Y+J4F)M2>hpO zxpiSA3&P>fi`@^<|6%JZfTDcAe*uZ5m+n?U1OZ9u20@YjSD7)|byw7var_Ory$d}r@TQDQ-he1mM=;@+( zk&e1gLtc~|efdYt{^kz$=QX5T*LI((heIhGW$GH+#@bkOIXpXWsxXG*ff!i0zbY(}+IxbTA-iDU&)L<00bHvvB=ab_6ta3$TWZuD-~L(M381!CdMLk0U1 zCi}E=(M(`>M!Ix=f1@F4>e$ZV1cJ;#xE;$PylOTY2Av9}%hNpsK`jojn+_++)|Rw-vY(q02Smwsrc=u}*c zPy?tz&GQs3xKK6@7|Aeu2;B8m!xhKarvW(RdCH-ofz@31>oANbSYN6<017B- zt;UtqdvG6Q9H?}%NEWt3br3oKQfcEkNTb0rpp2YvYq+NXs%4JdAGO2LsT56CN*(xw zI!~e^UzoZl=(pJCA8{teEq%MiayhUKNs!B&F@w zH;W$W;^Z9Jm2&o1DRGXtugo&L{TtD|xM1_OLnB}!L3SRS+t%{Op&+OA$R4rw`TF9W zDJ$_mlb*@wn>iaB(3D-|7`pNt#HMdZL)x0YC)8;K1NQY2HVgcQ-yH9ubnOKaYj~i{ zOy<%P6`ddH&0qq%6tODbaF}_+_xIqX>Ou@c!s(mg{OmV zlKs+SqF4?zND*kj?!vPiP=SMg#u80z#dS8Cy@Km>2U=U0DtDi4rCp)epu(~zn3+xQ z>oJB^H=rri4)aiV>rL*HJv)MNhBZG)+P3Gc8{N7sD1jM%|5bc+-2F?6fXL?0H2eW9 zwsJ|q8~ zY`MoRdOaf@1*j-Y1pRJ1+z=<#@NMLsVctfo!^FVpbd0i9N}+yPCVe>2#slQk9Iw_v zC)IgL{tIXt$4V~@giQLM`{q5oMFD*i|0{m)C64bt6VRE3H?9wa9eQg6+7rJJpL&DM z=tW~XTEYdw1GhpxYUqK$j8pG^;y~1-T`i6-T+ruci0rptf!evZlyt4TUhc-~SR0Z@ zP-8|M6Vq46vv>3=7fWA7mqR3hPk#F<(Nxzsn(*f6U+cW1;+TY9=)WRbni=%o4$Kqt z@N}r_MkFO@jmA;Ea~Kc$Mcb-iMAc>V>724}rz9=8mG@q|K#fkx z@BGv~3WuPRSb~B!d1F1igUBQGHV0-#OU!CcP<&oH-hN^TJ_Y_6Yd(AfvG5iOCs*p* z7jM53LeHM0;{sV~e(u0B-N8g6N0!W4-tZj7X_xAe2SQAWrI~c~))~y-&JiW}a{$VE64g;PZNj z#9}0M4uV;oGmhn1lJqFoU$iUWn4-{RUH4|j zHJ-!15#@=A(YC_TfO4Pm~>dODvrsn$`T3`j(2Pa;p8rrVkdA zXT6#lZ61qRG^CRt5aqSpy`EPBLE`iI8TWjD{_}ptP_4Ai^i!9whX5*$vL} zhC<`oQdS{JBI9ZE3YMCp0geM`84d7{@2p)nx}EydLW`YRdHKQQ+a)+iylQ&;V5(;o zvc-+hmq)S1Qq2HNaJuvP%S6}U5L2#RMjUUneNMrqfXfE*@~O;{N9*>Zn_gb4^%HY5 z0je!?h{O^Dh4=S+i#i+aGER*zdHzzoVUB)KSOZn@@AoFCI1L2Jx2lkI9|$P0^Y*2o zPs!;b?DCY|KD*|^8Bw3w019d%RMjwx_a`mR$V8 z_Z^DQB1PXym8KFu^2@il5K`XY0y~6}hWYZLh3y(>?b35ljO5*T9AVD!J@ymK(!@5# zSYBN54chhdd`gN1is~1ioBjA2#YGc}=2UuGeI+ijsy_wxUPj4x-$Rixl!2V~MpfCg z?5I%qZ9?o|FBv~kS=^n6R|%Ky=oVKoJ?tXnHi-JR*e#SYNn_@dqeP5 zUBkR)Qt5-1WxQsG^Mg53ssX%IVg`{a6s0IZotVV~JU;Jj`3_Kd(B=iw)Ol{@pdXLN zVtzcTpA5bbQsk|c#Lc9Ip2Bw81LXqKtEANLpr>7E3ne5Owy{8;$DHMUyX3;3u|lI9 zX0Y1jZN1Om-2jPo;OIroI~4mC*D>^GnOmIxn@+UF(fdvb*QJp68#TFDDx1MgE z_03(k8_Zu>kdfS87e?$}z$HR@?}aHHFZS0Xm&gQKv;@N`t^vw5^j5cb;IhGFJAOJs zny2#3Ik#_pZG4t8@k^)k(j=e^-Wpn?NaOEJNaq!9&h(|UyPIJ>ND$uXxnx+gta13> zpXTsvRr*sg_=1k1g|@; zRY1dU3{;tw#(;}8H0i4Gkfbc;B72(ogRo=KQtaeTEhl#?@GJ{(t_+4>T98?7$ zK?xa%Ks!s(CUUi|4jFU>2smz0X$7qXz!tgQ8*}xe=ci^n%|mk%(UT(7eu}$415lvv z)4C80$xM{IMmqQMq}h1~U{hC><#bv2gTq_*?V6img*}s08oGock08VJSmzv=T%X=_ z_FC^kT)K(|Is>{WY=73&-|r=(e)h0q_Qs>-1$T|OELw0q!NWlx=!|u`F#4H&!R#KkbnJ=(Zi!F$!?dXB8{tC|2G)rTB!+bz4Xnz0PZsz;Ko5&#G--ErWW0_lPts9% z2mDcVPCnp>$J+EVfGXW;rx8DkuY}Vordj^lZI@Fc-dtdUm;8kRlTxx8UIsyfK<@*Hzx*cmAV~i z?HbG5a^2rdJh7}MKN1@C(+rjvRw}z-1 z@n|0DvEshm---y2>pw8a4-*0VAP1^J+I2Pr8wZCjS75CBC2!itE8&9xYl=3BE>p36 zSj{}~$`tG~$Ln+D8Ee=2_{h3-pC%+aT$aT742O`~AS7C4h-I4{+gB2Fn=hvPj&Flz zI9GmXuKoh`b`r0#9_k*}`n@I}!*Z!UFf=3;#`qs`v8*rd~bCA@zqiW#9df_?U1$WN$KC;B!kA@p1~I^r z78ZH|g*%c*CW|dfIJ4Na)g_aApjgGq4mIP7H)$u4^C=WLDs*p|$1ik*SqNaZEvB_k zTNMXD*(BOIk;BAutHttmqp#y#^U1feo7fh|$>lsva#9H73~kUcJ9n?M`G81d@Z--{XKXW27rfu zd6odenEudHV?G*W#{mB*??0ts8nXLES^SBZMJAf3zX8D6nc+v(Yw6)1rfxzdvv4_Y zo*%ubvp&K?0~1%1PiUm7SG$4>Z1H>76*!#P9=L-sB|6LaM<=0ih*_iO_G|hnKy`J+ z?s?R)nUpXuj!d>HjEXXt8r1i&y0B6TEMf%fXGyi40VmJl}gt$uZ?3^>|)@$Pf0X)*@S3M)4Ml+O2#xzOBW^Y zj+H*WbsFMeRvmhU^AqoSIpYhZL$0v_`|x6xhik8|Q#+*{#k^;_%P=!GGCt6wINDSc z?b$Tb)4axMa@KpqJ!Q`$(nX;wAu@&-i7mPnGAEGq%*FKeb&>IbjnzYvQ`QfK<(^t8 z9QQJ%d`(;_{pP;(5i!T^|}Yj zyvVG%siQIC+M}g!C2f6H4C(%7hbo@0vLxKU<$Azjy#P5gG#)|VlI2y;l5XtjTmlLH zlbXoFhWxR~HwRKTz#rfigF4s3pYj?{F}v@iGIWKSq0Ne!ZJ~nnv^98!qh$J64J?_% zA9hY>IPA+jwr0_x5(@()o4$(bvIbt27II1;3Q6pqtguqq=H(zRMV)9ozxXWwR_b84 z`^5o}-2Wb{F0SArpEB-$ct5NO*@0oquh>}$E$(}Vk!!oYB$J1<#XqM%#vznoo^s70 z(UytLxAdYKq89mQFy$PPD`CjMP7h)n*>Vhyw-v@CyNRpnG~ccWK(|f&{DwNo@BZrM zH$)RxyFPlhtF_J~3%p1y)>L?QQfl;jBT1GiJ@MLykK2~wcyB(S(?+Ygm}?P2)^Ew&CsjPt%zMh;obktobkZ2gv2koO^_asI;Zsoy~Pw8H;0cjwzdk* zkVQGt(|dQNfW*WMTH0q*%cK?quFJ-l(@nhXdl;j4@S4@{7DS$q&OvEsBz<;Zb%_U- zG&wCYD6q!^4yR%Hf4o!y8~5 zdxMI{fM2cR47xsYdiuAYc9#i`(E|S?N%O!!mKSWaUX~=!N!x%4wjIvwY}gw_t>G&hBflquRnVkUL6wp=xJV#*Kn*ZCe6RH05n0E zJUlmlU$vzKRLo&YH|yLuc&|v#7=( z7Vzq@wiaXMgFdP^b5={};|c?dl=)ZkJ-*uK#Nt&W0os*!E_jwzFUr#6DO<>pWY_1a zBleNBz*pzU53cym;)u45lR_ek^x-_k!ZWI*X$*l@$5P6@0lv;+!s8E}y%4u(WMVuH z%(yjlkIEvAyTp{HFIor+pWc&BaG!GEgvbkhzTX-dV)i2^1XR2QpXBaMi+;-uCGx*= z#Nc_vjv-}s)vC#+(uc7oxFVF{d#^xvuV~*_d(+JR*n6ZB(p1`UrpG?B+cfG=&nNG- zlIibFpGo~Je_EupuYb)9q8v6V_eHTcu3PsQL=PC!jGi}m?XHjr<7qLn{ahFJ#k6)xfly53TAopW#ROd^{fjO3 zNI5{UhAbv(YE7)R+BViL#`o4Hsyi%=D0g{XK1|>=jnAYAS_MT;=0=^w@*KF<-~9MH z;E6>RA=j@ROjm=8)2%?ghVjxW>EwF5JFCAPD&2kX$NUMS_+PODA=W~bQUl%T1tB^A z*tMS@4NJ}H?Rj=W_oZv>c1XK(dF4st6~`?7{^}|}1hw3D(z0J3R)6ojPyMs#*^y{z zDE-7DfJguazKGOyVDK!xgYs92e)I75wCF>ooY*k#PG-CJeKE7ue#%s_YD^45}0m$PWc*mqMAG@>az ziXU9sbuO2VRivcZNm3sku_q?`uroSfpR^^792rVF*BvR&gUkmMcDYmo-OZ9Zp2Za@ zT@?ADcNIQ3*wD&XBwaDPjdk7T#YdwEzZ8jQN8eI7#?b&AbD#p=)1pmm3E@p2~9&ax~ zO2v7O#Xo|x=}Unoo&>6IHZBb1nkq*AYC2?b5*TITwVxMv%ci9+I6knZJTcwsjlX&Q zJ`t-7x}0;N4+LngN0|4|T6KT#ibuARY^_K`IG5SSmyaaNo0e7}8=&;^#_Sm~$nX5@ z6ia~d_!vZ=W0u&Xl{Ay5h@xW%V);qgOV&B%80(dtx~eQ1(}54vx6W`yzd7tP)W*ci z>(YKF2A6N8WSeno$<(n0#VDts5Lky;QHa<@qk|VEX?j-^&il{qiF7qR`fPIZou_rv z;hn2#`>DSnU7Ex+G+;lV!s`Hefspd9U`tci!Q5xEG;s@^=+M)_g}177>roVnz6M~( zRtal)57%ZZqUHCCR*RgyY9+q}Cmii6#T&ohHHzhWG6#aLqNjOPzwiRzc8}hW{H}f> z+s{YydkvpI3`A^Be9TjxTk*x5KwyT0w;rjEJ(z#friv);-yRJ`MGRxtnL^{{3dirC zd=V#XOLvNAU1kXzAKhf?nm1Rx%@5&G0odtktWI@1YG1mj*={^qcx*o|U>Ds6Hfc}Q zd+%%=^43qs-&?C6ZuiMpAE0I)LEu2tfChlmtbfXks=0qnzO(yo6sD$7=)E?xqSihe z4Db*AH~}l8KM?Ef#F#-cdlWxnEDqerw^mJ{L+7PbW2&yR-wx9?`jEK$=+H6(C!~Pz zfm$0jet{f^@{@9mc0%%UcBDafh34YoN{{zcx`j=h>U<9E(nMNNqD`F*rC&DC20Fr=(O!-9XGp2|i84sn9aJujAh7Ri5b zNc3L(&&&LUtXCG%s}EC(Orp*qJu#~z*ft@ZO`iAaI}Ph!qbb>dN(8I+yRxk213MKj z-s_0mpTAXRKiy0pj`=>8Q@$ybxG|+^>BK@GQUIONN7-()BEtmIvuMQ{_PV0!IG{Wg z{S#k4`FLb(bt>0|=ib5L;Z2wrG1P|rWD88XJs{Y~ho*>H3er}-eJIr_dY;Io)%M{5 z`x#k)yKB3<(c8H~YI2`#<=gWkZM&<+!aSKW_vCUuu$)UXq*p{CY}t<2tJiL9+gdg) zrkdrvs%bX*vM*L!YxWXN14?266{BUb$F_{eH0H36&3f>Fp!>4mo3nl2fsGy>S_kY* zDR26@8jTFZpu9Wo=k(umx&{sJ$4oN}7+>;Rw9(dn#YLAo@M@vJNBdjKRmb^)n>k6c zto|y*l`z<4ND{Cv{*8CIi2AEpKG13*ECnQ74FI|Vh-`QSfMKvaco9Jfb?vz#;oVZ> zFrtHn!>kMdmzoJ_UL#{!$nc&7kDNh%&#pS0XK{`{-*aoRZ;68l>-j6*obIcO z#D>ieo;7wDep7(RfXdr0x1djac8nM?D^$Hgnn4`?FW<@lhi1`O@j!96(SOzl)Q7jB zJ~Woe9Az>AOQAIctZ~0P1399u4fL)0nJD0mgv9yozK-TGC~x@?OBm<-#1yQqpOda8 zFQ00F*a?)Xm#=8&Yg!3P);| zYP$2xhTMfDkcNEHO2+dK<*Sr3&&A}j%b`(8Pl3w-?*Qv0vSri${F`B0+otmr7iYDS zSJwBC$9dgqA4d(yO+VOFL1 zk+!;{=fCgff8I}*qFo59FI;CnpsA+^7U#e&P&YzSnjBCzm*A^;O;0pM#~PS|4UBFy@fS!fVVrn$NC3{aKfr9k8|Q?q}p?; z?Kb%f7~jLP`+lfeqCch6m3p_UivN^h$bw{8=JAR~&}M5{!=Bz6Nmz+QsH_)@%%#QT z;ZG+fo*x26u4tlFQ-3wI|LbQ!7g@Q+>Fx)+jw#IcC_jL<(A~Kw)Pk2R+CmcATKsnL zNSTFt60hpt(au-3Fj;+;s}#jeigscDHiUAT5}$xT+u{W9Rr~pIwU+iKu=LCk@_@z==7dKtjqrjj$#+o{(Jb~dMEF5mtq+a*3A z4asHr`gzqk>h7W6uUmFoa{5dDNsu$G2l(N+$nU_wFe)Tk6Kk@VX1h zxk3N4GIQn3dpn1NmL0_YmtV9Y99*D{ONMV#=X;WRQ^?xB@2`$%Fd^$7FMcU%*qRXr zg@SKhdZujQDR|(QlV`qxjUgHEL;}Slh~J>yhY$@Oz+6jQW17HzWCS)I4y^Z|&5h5{oPeuODWMzje$T{HLyQK7p|Wp$Y%POn~R zK!J-+*E1Csy#v9Xuy+6ebC&vEbprQxQlbCiP{sK}{snEUEMH+|Z<5!Kk2P;a&p4{4y~pFbA8Y`9*Yms;>IcikS2O85{AyFawvWG}x$ikTLk=_#D_{G}XfJIezK z?*LQry8EB=&L%et?k-iS%0jHiRZzB(pR7x}ZU;c)Ha(b#{-{SNl4eRS?S1#i;Mdhp;AzGul_;w zKI|gOW8>uTA0>lIywDC#vYzqRU5EeK3hJcjQT%j3~uDJ#zA?iu`NVVy5M z%E8}+IO`N1&r^w;hb8$Oa-Rv^<~cKJ^Z&I>_s zS^7J%@jqWqR+2LSXB^G2Hd(ETv3k6@m8$KVd=2V=ND=)!W#UUAtJYs^c)t7sFItbh z&3c z=yvZzOsV~VH1$#6D`9mdwaS&3=rF7Q^@8FXAhWM|^NQ#iCrTEuVqK40kBB3S%ARM7T4AoNM~ zC)09od44xRWtUI<1T(_0pu&AY$G}jfAvDngM!yj6mkHMB;N$8y$*nfngly$W)c?=6 z*pLM7a%bQ7=`3f5^1?n}QsGq7jK*#GTc{%A5GfoF zbc?h*3=EDz?FM6>lOWKtSk*u_NT>($@FYwMPtn^Z>lx(xe4kS?4S|5g(|z4f{D|qy zhaNBM8=}$PdGG9W z66kjw@TaZQuC1}$cy!TZX_RyuPIT@kwy}&i+9tyF2SFP#?3aq5A_(3mXfT{k3TXfq zRUP2Ew&56_3prOn_{cG8MR^ z;(u4FHrD9U-C9DLYs)w1h%|B`4U;>Lo#T6AAvja(WS}-oI)>RNS23Aqxa!?jUrRpu zv((224b@9nq_6D9?pWqv9JQaJY%f^kvn|UNhUpXSEk>8NZWn<*dqs9^>K zhgzuR_%%F7v!EIu{f4pK>O*$j?NbEVkKy)v3hBqt2n@}J0n>eTa~$n~`N{{7rCN(% zI4AK)KrkKj3lxowU{4e`Nb9)SvWq=k^gw;6OSL>3_nZYSURKI#Y0CvtscSd*iA5Y7 z4m9_MRB9pb``TK@y?+$!AA)3qafydA*SyT2g!}$N(wi1jMn=YNs4#(kcWbU|{m57) zi=Dlo73EHTwup8xgm9u$3gvc$)6tswf4?RbAlzbdo96}vYy)Df*@Ugj;{5#lzYsev z@!;wpb|!kE@?vb+VC&}Eu!DJCp})j+b!x|bSVD5D)|lVZ2O{Y_q+ zmBLi~(&m1XwrR;yfT4+FB@ChAHGkEIrbguCArS$=bMSlKg!(voeALnb%IcM4QR>Rs zeYM$7d(GEdbALdYR>*Z1EoGd#yLCD&-jg(tlYy@|-L~&t6^irzM~(ZYX?1oD94SG~ zEanHMO+dN{vnZ}}1BB6XJVn;N@2U7j9Q;^gv$#|BOimnc&!(N?nw`xdIIdPb->mXM zjF(^G_oBO;;q1=?2{WbGOW5q`J*dGa4t;azB9X!n#|dviT1of)gI`~>WXyOkesV7d z3AX|?Iyw3?|29AUtM#)C4LJ2L=v3QxOhngol4&h)n#bU$wD&QjKD~^zq&ODiga9LO zZ5KC;gY&P@w9_K1A84hw{7Fa5X+QX~P}&r1DqHeoe1*tNa<;-`f{My#Q-HKbfi%S0{Pkl zqPDmn;hs6S2>Y3`daN4z8|a`=ggh8V&3=vKf++th{~Rok>0!P_UE?Ot^;FS5P_~%w zVSDt}1O?$wa}>7dl!7TV!1rM%!eV2b6{W^}vNo!92Gdr4i4aZIqSYZh?EDW6E|pJH z`dTIw?#}8X)?w?a@BydGig%OlFc+Ujx>!!ln!Fs3q%fbN67$B~G5pi>t$mCptGW8? zXEprBaFFXR9~XXFg0qDX+x^7X!UKYVo?&R*z`LX zxqo0sbgnW6FpXo~7K?9k+OEp7gP`>+2Spyp4=3cMKu&2>$u`!Pb4vNua)@vjykO2_ z$V#`1IO=Zi6-A8HjSN7-WZ0U4U^GQ$CGr=Wh`eHm$`IrnM8~Jm@|nsSF;KI#SmEKm zMqd8PESPeE7V1W|B=mVyfsnOUoMUm+%J!C7U02Sd(__P|1rN7e!>6t#hFno1zT7;D ziuRJr)E(wrD=L@~wdm5jr5%4-;$lOA1uD8PRq#zm^cSuCccD)yg;c&6eQ?S7^I$~D zcVdodfpv5rMhi)w?%XvQ6Y^M|^A@k^k~2f5q3!d?maPn`f!e0SMTR2ib;NQe9t7*9 zYJKhSVY$WHZ8d{+R?~uI=9|Fx3c%CXMxzdsHJxuIghxPY^A)*Uk)#3NVd$kPS5&uQ z%2YI+;JaewRrDf*eExBTKujtt6VJl2@WMW>Jk=;D>w+V2QOmy!q?=*&maFO#t-l? zLwZMgJVV}$gcWamW8Tb(;0|>MX2dmS@9D;`BFXV7h#McChnA@Tu+^f;3IW{wSb1OC z{zf;y1ap%YG05>dYxiYUk`Pe^(QzEQg7=In!T zP#>$uOZQUADGG7TveB05t<2F5#F*bR>Cd70 z(TKOmq`Id0cG~tXJC@Fh_dxsfUr9{>_WMK(s$;{*G_Bq7FP_K_u-riMFmoif$94IB znMW7Uhar>1k_Mx`2H;pSJb(EgSh6nho5~U( znaD0G;o#vN@C46d9|o3?v#^v!SNNT5sT5ocBJb8q&0Q@stn9px=`8n)kW|Yd_CA+{ zd5COB)9X$tc(i250%oZ)N6<#`S0d3$TZUO=fywoHw&OJyl%x`;53YrbNF@G51&ri2 zZss7|^NEy&Fu8_u`oq~;A;2JMrCzf(dBFGn{ru@#jA%0_oS6*^B&LEmE!`-A{OaoV z!JS&&mDM8;eOy7G5q8U$oRr7542P>**RbT&u6alLN?tf^Tz^|DJ@$Q*oy&b~l&qdI zXg?QNzbhTm4aYej!UID%?L>Wp6VRSYTJE!T`4cR9uHn4yTZ7Nl-G@K zkWE9wCew7OQ6%l8W!%L}Wy}=&7pDoak(UkBjdCn=Y-_o973Ad;uV3&l`{B~X|5k3< zvLr?2*kB&|fhGbsSVu-b>)eX}2!%hV@3HqNXm^n%lFtA;=d0LQ5g8jF_Hmig`p0T)4}W+6Y&Y$={Z7h{eI6nP)P_78Z1||}iO(J#OiN$6 z_d0!NW52MLGnD%S;L8<*;FMcP`3_=O7fJJu+14l&i zpvtx&M7m~zqd_Y1K(nshf$7HmnS|J8npGRp2HI+E7XNV6Sg>fen=2!(I9k>*V?RLJ zhE#YKggP-ozBUV=)=;mAJ83dP(Y?(4)zl}s9bHr&z9hkpV_QHdZ8NhQ*XLU-5VG-o zk~uTRsEBlqHDtfmYb)?lG+jR*c7iGLB}*i}3Kp~Gyh2gnR`xxvB;@n+AI8pZPZ<8$ zqx+Bn6`Uf*t^33cePq%ymGWc8$01nyDu@7W6aR4gV4wEkDUJBa0LW@7DUTw80ZL?&45LGC(*|?v1)T9u? z;L~g`SB0A)PB@jbLA$%M=}}b;fv`zNsQy@tXFHwThPftF`TVN2To%;&CL zOdsQ_uO`7V@{=u%nqSC3HI-spK)9sjs0D36$77FqfDwD&+3T)Z%`g{fyo*9TP|CyT zXr^{=*xiu=PDNmwZLX5is&bJnjB~an@IumvRNF~#s9m*JQrIds`d9Al5Uw(M^^9>?emb0O=TOl8O!HOsFH+AnM5QB0B6%yVe!O}H~ zI@Ni7E0sqfAi~h7N}i!hPw4<0j5ujmXJh5ZlJE1O>5rBF zkW*ve+GFFt79f`{LOnEj^kMN1t=~}B{$Vd)^Sh?+76F)ficE(@xW1|%%b~&7xxH1I zcL&AtFZB3dtS#y~Tz2oD7iL`0*S&=qb3S8)E!BxeaJP+eLjs`c#_`8rZtzUDYGOt? zAUg0Ds1prS*-(DNYVskx1vHKiG#{k>yotdaf}lAD-YBg-p!UO_h>*q@rMZcq+ltQJ zBIn13OfZ2(?@FPoCJdcg2z4%F{ze69alGGF%MT4SRSrzK+Sm}k2^YSiBLKG0dDkd| zTB?xylU1Kl;@7d&n2U1)o$jf0(biJ)>=-8<9ZBxlGJFh%w3drMY_I$f=W5>mCb4*> z+-=V@bO(1V+XBKf?ob`u?!YWxfuy4coIL2wJ|GK;4kvXv0~#$tEwEYZm5hutvrS-;0;9vbpzvU-52kxZ*Zc zc6$nFjOh+S_L&qEq;!%V;XgMtX87j=Y6e@gZaKcd9#qF*et6&=m@eaQ8k(ST$GzCRg3;8@4qtMf(euly zcY>fPq2#Zb-y65Gxx)9Q<+l~4=?bZi#X6_5_14;_J2Ne(=)+jlJZXtOTm^zx*ja9c z+y-mY!(-PWkK5a=ItR0qt+qli@J-RnDtUpt65yjW^_MdLQeUiTh~)^w147b59VK} zG^x+m)(}u<#V6AT;ar(7uOXnE0l@$6WC1G37eMI+nCVojcT-on(A$I9GtYbth_=d} zty9=B@&DK+^B4}!XnvPS1~#Thq%|?pH{8RMEgTsjDk0*H6?z1%e9_<&R#8`@S+!CV zk3qYey$Fn5b4TnxpOVoj&6*vQ5{7uIFBo_usV==*S+hzl}y* z8{6}Yqxh7*w52;Py|@>73Y*Ah^n7ZxH_FWKP)P|zFZwXO%`{(?Y_};pE3nu*zd5jl z3#lBWnio8TzkEU29olg1O|H_KF0cRRT4tcH<(;FoH1v&hM3}P?vb~zQ8L4zRBs+xj zn!KhwlA3;o!wJW2`q}fu#QZ<{W#ISqJXdhleP1qvW{;sP?tkHoP}B+y@LHmZnj|!CG0BwQ<@8%xA z1&{XP)TOp9W2$3*m%%VXEiElOudXyELxO`}{XK46(_oZ-_;JOLj0kCnjddBAk*eMp z*0wE?05v|b7?ih2rFPN)$;O1CP2zS`y)HP_zy71td%{kxX&v$|+G z`QHmD`^hw@8;w-%*OqQVJ4cisjMwt>Re$U2m3OPa zo{QoR>xneGHQR0rfNFez{1;yLa~q=M`n_C!T@(3g4U0HO)AH?m+X*UxX+a=~BT zOW@?w831zmil(WIY*GfG1n?^jqjk;EKHt4GT;FK?_;-*WiYIV;iG2QvPjp2G{c2{X z^tBsbvuwOA9ANdIsj>2N8OXP@=iAzTpu9r=TEzK*Tt}0r3{x!Np{(PGmO0(1>UVt~ z>s0(6MC|3eJz=3v&HiNHpB)H^b|xgulD2xGgDe(T&SCIk_75?(4E!6=C3Y;z(JZhhwXFZqqmr9m&&bc~w4w zV@-6?LU(2EV--MtxJ0zm@8yY|6rRxf@k|Hub_igWjJlfy>z~A6P`6dEmdaSgvdCQ1 z(~wb-MEaQ$MkhGpB;cTKJOtXF6%1D+Me;IdXpX`i@telmMjBLgC`_p;; zLZwD@UKtR5++YWxR>Y7U;tg33Cu5xa4Sk-o9E&Xt?uALNgNNakEXqM1Q=QI{fI%RL z22f0K?(T}50YQvf%}vJ}5}q3$mlEZI-z)`BDMX9~S4kS7zGa!V&qHIX@3ps9G|6Y) z68Kfm92M?Pk*^pmS0Q+Tc--3)WR(9fG1Hz{5;YIwlX}lyLWt2#ACqX=3TWYhk$J}+ z-^V{nLVWdd2e3I>-$e#dKVBLRSF4|DR&0`A)?)|tsVJ7gXiyMSv~ojOLxi#SIi3hpE+d#Cf2Dp)G(dPi^VviG zy_3V`TcncS*+@lG^$=3`%c$uN+)Xd8{hYO6d6w-@KIsdUPlv8in?Zw}bIqz^kd8vd z2_=btAc5)8Y}e5w?A$1LO`Jt)g_S9yg!SYaQ5?(-cy~<>AFD%Jn$(SU64LswhCW+w zEM-)vaE>v@QV5PdIfWxi`^q2S$nxt{ynirultw>u`)5CSpNv7|VCKCpT<~cm(q}aS zWhi6SNmM^zhq@EB$=y&qo52vd3>fI!@scUgy2fZ)!c~#$k4f8czg+12#2*lxfF(kU zH~J^tniY@=hRVh`K2> zB++4px8BTo~l_Qop_5+nJW4>+fCrhJYxF+gG;r=?G3s(td~+_n;rc zi}6L+*ElctY~_Uy*VvufA6o6{*!{|{&Y5@SWmA$DtajF?e`7gCrS=XAr0HbrL2dDR zEF3~V)E`%s@`w=0Z91x*)rV7ilt({Y@9)$-B+UG?5mFx#h|>^ivPJO2S8U9<@d<6% z6xpRrPRu3j>`^pV^i~MW(F+U9Ya||5Hl~`V&}}_cr6aHGja;f@)S!wHiH65&9;r`_ zva<54!{_LkHSjQ%Kh4UGR>d~5%bIPAxxfV^*3~zOtYSFKI&0$`9goppW||(eSb(=J z?~VSG#4AjfdJHA_x$2=D#fhAQ!y`?1KpDu)g~~o@3a))7{K4`TRty4{{EorShej6B z$1XBQ0pZOhEfl72UcLWjHqyK7HbJsi=Sw^I^RCg{FTiJYg|6n)wz!MtlCW~e0H&3} z$h;dtDFS?u!}hhL-AXxYn=ld&77rL&pA;yR%QAiA2Kg7lsn4`;8bvJ1F$$!K$WV7I zoCy2y!$tvai-(K?*+vdlDq26jX=&act`4zB^Z-wkV0-O*4;hDd{*AhqCBV9QGkKv* z#*d6^CyrS=<2p@~5~iQ9Tp#EoPmfgYFONq+A8q-fLH8a$1i z(NTkflQ4lmj=)*8{!&fL51b2B!gy+~4HEj$4*eg(-U2A@bTCyZ^ec`-)#koBbAk z^z-EcPKZ$dp#U-2rN?GIKDGIaH-u2WJYBOVn;kuEki3e1QXe#cs*^2tC|84dRKo95 zBE5cUroEg)T6KHkv;Uu106)JoI&HV19yo7$NPB+htL6X7DP?Er@rBQ>Ux`)6%`Vb$ zvhf^yY8fk`rY4ESYM}P2Lr0-cBQk5+q{5}_*w!u#cmA|0xT1VICZ&Tg&%5NbzgBzq z=Vg~1G6sUbzNRq@i{>I<(}DEM0BIQvP*e>#LML32@~*LPrs^P@BlqI*=*eP6*>a%&%Q@nj%W=FdV^FQ^^;oL`Uymr7)$7KFNOlEgic zHY#6YLFp&I*l~F{1}K853%^+pkBe5 z@$={X9m*+~CnjOHUaDH$sQYz0ucWnrK2uQh_foZ~<=R;{(6C)ee}P1({s-Mgp0DP; z>!@EYJdox1w)gNK`-xb-z^Emiwk7%tsY)jr>V5rn?*8NlLmpLopIY+w*HUK7W1w#g!O=k$N|~Xk!zPW=bOfR zQ@+FYy4fdBoyWTVav%r-CmB7mSlN~muU*b+409K&rR$?^y4k!%@N^Hes6-QgWQjs4 z0HjUSr!g=kErY-non;n%J7oQn+7J3yp8&661-g4K;0#6j^&pk@9@21jy!@xbYDpD1b?-Kj-CZHq2HooIS`%uQ^Xq0T|I+yJ+n^%Fryn+v z&AcBYMEccE#miGQ(#9Sq36Chka;#rzE$4(?T{{N}5)cIhk^jJ2yF!51o|6iiB;gvj z4!5t1iU^)a7q|@G)u_5_Pf*#j;KCgU01Rf;<5nu~+bWr2MN$h=EYaOM)0F!lBhe0f zmzg)qAeRVbfmR-!u-h_c$&6p{`d%;+e%?WYitW(`gX}Cm+i|rH5NXq7`JK+P>)Z>} zQ0+FqU{ksM`va;=R0X;(-97Da+A7cYGJ*NRDl(8K;SjiB3g!5=2Z^zjQ8s&#aOwv+L%@Xyy= zqXE`H?u3Lf_fVTS^-;8)VWkE>^>?P2owXai^~WYGCS`Y@;R?Gy+ip_%vqYDP1LT+9 z-bqFG#>VoXN41x&dw9g?crC*uru{1Z&|MV*5=s@Sy}O+c-n&9CFTa;FxKh@`#2`E} zG7&`NJ7JqM()hh}lnHP^658;%#i1nUUqb6&akw`(M=uRXW|-Yrw=kPx1!N+L45?nd zPu1qo@CxmiJlJi$&)1iGG7Vkk{9b1nG)yckt`dD5wFUZa6kM#>>PDSMCIV}@klK0t zo7z1`>EE9gzUng5gyFf6Vm+`Y99ghY7rn%{L?$({vrL(xs@Z_%9t{Y!1#Cd*| z+asfg!ef+8BWLLMuSA+)IifaDSf^Dfc_YR`ytEUB$25GReG z6g^Cj`+;m;6St9}QgY!G>2;FZCb@gFp?WMguR8z;v7?lL#RUl>O1{a!Q5_Nzq6lz^ zBr_8Lot57Tep|^s`_bodW7cEu`5z%$#AGBl)U7&>zPu5@fwcMB9n&Ux$F^Vldc0vh zqK8;~=gQb~mIz9rJ&mk`Ov?CUPrilp#rXqW@vdza^|H)gPexJC!_uF+Ap>ReJAhoj z)8j1y3Z33wk``}GKov9bS5M;B?qNWV*empp^q^xxY#066_&P$~b>{$dp0WAdTZ~>~ zyN_G@o=0)iIDlfY_1eoQmGSbdA*-|x1cyT@=_))+Z;&Y=Q-j^y8;Z7Jx4SQpfFatC@-dO2Rd z20M=ing|u(T;&b)GT)y3JLume9I3aeJLcZ8)#iC~KUd_eE6#v&Rh5P)U*BKq?{b6Y4#iS5;Y?GfM8b?8geIGCI zS*3ft+?~652P&tV(#_uf@^8D-BTjBhcZU8GiT>K9&-1eX@xBpI&38EsLf(GqTJv4! z%G0QeK9ih|gg-nEl}N5xP5xSH?lP)y#_PD-o5JS^$BM^JaZaxL?~(nV@6YH<7vuzf zzpZ?q0Ef3Lj*`X&_r-*W`7#j;JWVlseEY6L?*HddY0%K4DFvCa zuyT05A{=*Q5oKIV1MhAc3UCNlI(GH5JYzLd3BtMD(J%JU+}c)`gE#T6_uy9w-UsQI zavHhLXVl~9gwba)Q-1LuM>Nat&6|g_8hl#e>#q)sIC+P;an8T~XLtJFZzP@(2WXtV zCwg4|PSx#M1?DwAl2(gJKQZNtMm5Itp38Z<74Le}MpAZ%O76ZcHhqo!`v7r5OkC{C zWIWxxiAM!dxeV@)R_HOX(Y+r1hVD8E6UE6GXOR4()Rpe4*d5n(a00N%<+r% zxb4T^2A!6}iL{Ay&L&lQTwZjEPo92%YXM9aoF_o*ObEQFUQy=VS#uTa$g~%gzkl}q z(kxq3PqT|W@67)9qyPK9F#$cafm4Z9w`irEar83QN7?C0a!qs!273giI9@ojx{_z{ zTi=Vwd?hwujgXm?v?%h04!cQi?QY)4Ub=N>%KyG%=KDdiH0z&3OO%d8*?xlZK)%CeW)0(r^QKyNQv-?do6q8GEhL# z{-4G0Ki>x~GZ1RO_1VP_sDpdG&l|YEPsV#zx{lef(AbXr7mmOu#0=f-t*tF;Wsd!( zfyk>-9@q%w7?{YiyPO=lvS{ZgA|3Q7I)DHy9_bLG0fdk=E~jYF7X0s3 z`|tZkc0}?`<5->z=ieD$or+VcF=CMBOg^C!9v)Z#1CKET?vP4rdOWukn}?ml_Q)QY z5+Bm*x)U9eUIt{3uh6JQhtGcOy4tF3o?h9jM{AQt89tke;Smx2IaU0aUFpKQK)}e+ zI|fC?2xz3kLy3$oA%7~TTVkYr1nwmI9bXO#hUZ>*;%t zRp++33e^>lWp9n#k-h78J|JbY9-@~@@ZIYoBj+*?zRtifnmz^ajCmOQB$rvVcI0IG zv4S{UsX?!S%9qyGPN0%qZ)|fO1RfT!HT>V=vGeUNbRI)Pio1xK@1Onuyj38W)R?V& zg+XjLF-`Ci6I|Uync4mRY@9FmmIpVY%AE^~$updOzYyd5ZLC-wzw#~2yg|bB>v2 z&2{Ne+zqdm(~r)xbJ@wp8V?WWlTDDx_`

k;MK9az+An0IV<&-+Q!W$ANAC1R zsGP7+shn_mrTnG5+aJl<^V6@FreXP2t})iEOy7g~i7bYlkI^&cVW`v@Y}{g8W-=%r zola}V`h~Lsd=3(W8t&ick|tN{y3st2fn$;N<)HE9^fq7b(LWC?cM8g+JIy|o^{vd? zq1_AcY}USzcpevRnQZsAwO&`80Y4g&o*c~D!`}sOHN;DZgywK~l?n+eoYfLmnxsHm2 z)|mWmA^{nUfFI(udd@7Q+N)nKvok0sxjw=F-xMrkIxhGbG|xxEm;$}xFK6m z*s`4|(J7V5I? zP^tu6`RAVed1FZDbUZ>kT4P(YJXN_BO6Q-6ostQfRVR4dwcU?{9!J)d<<-rP2;=~+ zV2lF$NJLKTsE}kuJDC(Y{MiXbl_)PiYYKVl^6818(?%0U^X$|}>2RvE9j}6%SCEA~ ze!l&;!$C$mGIohR6Cu{SP~Im-Bo!!=TM(dKz{c9A2%J}5P%g|el=icgGv2Dq={@+z z;ANj1W)r>#zg!$(Is9T=|M8Tg!>ZW;-DTLwT@bK0Dx2hH^o|WDZoc>3V=(fdDS}LH z0bxXsY^EUIbxLK*=BdV4tclN=GNv)YQ~vVWgm>q~S|LCuwMy1_}hAHi5L9(0KEt_?~+M zRG5-?(d1ougg9!_uQCx(3S9>hTLP?EWa^s@wqf|AKl#ipEJ$OJBJz(Oy%UOByamm6 zLC0WHj3YpFFa7?j@%<;H?xO}4r4Dy|I;okDEn69@lI_Wet`uJKIf&xilzn=2a!!`v z`kN<4Ep-zVC+pWEFD9BjHv2vuL9>#gv7M@PwiWNhCkY&f`F9&fbG6Q=hwqm_q1E_( zkzTa~@6)fRN%;eb<@2nHDL*4LUhDX_Ta&4%DKrja-7wkALE<0?J)%Gd@3`8Z&3*zJz`YmAYc-CSF8g4tjom z=lFsTNO46fk7?9{pRZ%)l5~yZK-}id{Z%M>lQ1f+g}4-H0y^*N&m5v$@_(fOKlsGy)V2@KxS`sC-`W%;!?G=DBglh9sBy4lG<+ zmuho0`OSz-m6veo-s>&2m}J~S3x#2Sc}l8WJDt9wTjpM#NG{ z{h?C2l*;WVR6LQyu}afGlI8k_cqu;c&2<3J?NsuoD*L)$AmY1?0E()w$M$Pqk5Z=V z*V~{;%mxKv60$`*WwTl|TMxu)Pva|v!{2x~CZZ={YQ43&TC?_I>jsb{5t z-v)FRpBZE;_a41@l(IQwp!a_i2_+1TG8V>1_XRki)Lf=Rk*&QP+|m1lX4?-q1-G;P zphag1meNYJ(XqQQ>eY$3pWeC&79?uMKY9**kh3BUYT7;dsH@qD%lC2iY+8;%1yXz8 zV?ssIf|i)-rlXz%~Vu7}eiiS=m*62;0u_DCUh)hXz z9PV9oAnE7fu-$_6E(y4IY}sNWTR`GweIif_ygA9kKqTi1`^wG2esbtV>UFnzHN3}t z9g3{)HDUgh1dQ^0JR;&p=m=Zie_K8f(Otv>8&3CSfZLY$&lIr=F;_h@*)6 zKS|l&3H2O0SoHQ{sfJsCg=m`wLI;DlwvLAmGnF7)a1PMmvEZimyu&?B+T++d!&7el z;o3wEze+nGfNo&s#Hc0gq96Ru$~6J{ghus}(E5+CE22$5qzQMal^(DQS^LPy1dB1i z;9LVD?AFNL04ea|IbhAH4&jM__7D`JC(w-a zPcTiwt_v$gXr6}`huM#fVy!7eTFPa=P{79kk^Nc;=fiJKbUweFBIByNqI6;58}nU zJ#TULJFDk8{#b99QgbSu7St`#m+oHct>W=<$w(vlJLLbl+X0Tqz+aicWR1)@P!VNi zW%t;kYZtn)Rj|R)CJuZA@~*ZmMf>dZpij=+bz2_EdYZSpezgRcG5K2p>R_lR`NR)% zbJF^Q1bN9km>yO?FZPvNcn_O#MKA1psaE3%Isz@=xTa4w_&3mK2ho!YyA?`HpA&dC~YnyI`&`ER^QV+tcBDdSKU=>xq4!ch$^jj(2LN=s1{m8@FTLS(t+4sfFSMv zs6N-75f4~*A3w%nLEmuZ5{nM{29^D~8)CybXMpfAUdeVB$x2_8m`L^rXz|>BA^pLt zK{_zQ7z-1V(@|C^AQD?@$bX~d3OC^Wt6<%VT0KnugC` z(s_H!{(JMz-%sAA=^k8vIcJHDH|iyqtEvD zJyL)@;yFQqO;*9iLh^g4h%GBCYm(D4`V$^ZdoT0Z&Q82h=;Pdajx0*2t_L)@vJds5 z_(q>&q}Fc*Uf3y6y4^)C=*emfsRp}liD6RnMS{FLd!(1C-X5i>T zj~9iJvoW7v>SrLXIF~txa-Rij{2b@GjgP~AgFz`+(bDocxEb(xU+6Mcq)EWy+`P+( zc5mz7r#kiydcW4PZH#Xufzo?(BK|EF>u=9TK~FXyllW`I$uz|U`IL}PzsnlH3~qj6 zhS{*Df-8~3WU{ufxgnKlnPgh_Rg2IC-|rhCkn$pBW-Zj|5KxT_7?bP>+jj%lt0IGq__d=Zx!~*`OwQ0%%Op85c%CQyJJ*={)59qC)SCk&YeTsi2i>){l8>AufD%b40 z_m*#eH*M+v(LS?e3%o^oG-g5lT87Myb+=;2v+N=9x-H^vS>`C!5ind6pg_nI*#T0* zGo-qO*x^t5%F*TuqfV$F9`()m0aj|^JutIV=DThf*0^<>Z9#&-T{5cqJM~K4OVg-w zCveSS-L0EWu^b{SoUGR2nC$u;vjTaXr8w%+RP_!I&5`^UB1q@<(E^)79%H0lWsQ5{kAue^`7VilG%d}V<0m-@!3}9B;|Q+PIA+Sk0YZcayo zkcbmE1mNkN?|o<$yF6eF@C?tnIC+w1&_6Jw^4mAdGphNZd0X%P3jjGGr_X~=qoZ}6 zx?*8rl|!`FuW>8-4$TL*af#ehAOYvvVmkW+7i_#}F+AbZGs3K~&I#ihXj5tAY|XXP zL*Y}QE4v)>;hh0jf~ro5@x<~BU>4(GFMH8?W1lK8D0CP((v6RG8rx%RC5Phm4c*JL zp^eHv0GegSm!v1^>Met6>lD*=O&E#-wsu&HQQae%H8kPj{NG%Dab=3T8<$ERII6mk zXUrcUHWel6Q6Qwn`W!@-ml1I89ylyJuiT@YJ&vk|u|^W?NEE-yT@-K5?#VdN3!(Vw z{&d--YHeS1HcFRHr*JjfetM8wr(S>WS=7VfC|mxXJl}{-*V%eoxx+7PLEGlZ98Ba# zo$;2-98)7q%NdY5* z4Djm@5f4tsu(+jKAC$2UZ3`m9s{#!hCXM-%XI;A^^5RhJCv#bR!IbK5_I?9M(an}C zCT(C2TM!m!PQb9QsVd3<&BX)|Le9Uot@p$ml6T6rdsUc^$KrQKRb`)g*t`chy_9`w4qP<<327UQ0_~dQlnF z9GDmx^SkZZ2ynn+I_X&M?&IUozM!$i*2%kk#pJD8(?>J@_~0QaavMeIBz@dLPIcR9 z%dSTr%kY*)v*pkk#XEx%Nlg~5!K#OGF|X#|{nh`ViG2&LyFdy0TZWs6!we$>qx)b| z2^YI@3*F6r%8m~k!{Q!J?ZAxWRFMIHQsoA;qmZgh2h)!UkQQ$pTU6KwP=~3iZF)?ZRbEhJu!@8N*}EA`OYhtd34Z2T>ATD;ds!D{5f*f_`IxA`Z*+ozu^+ zv=v3mH)ya?Nsh?CBM=&X?BF#J3-e~o+JjF!Ag00Uf{0}M8}l}oVx7_6W48FDzO^>p zE@ZMwUi{MnUeI3rrB)f0^joJ{$e1M(n7|aBFW7G}wMfm&q<|2UMQLCP3MwU4cxb%t zG~99a=DJ7S_-L`=9p}>s=jD~4^*y!vRD($o{?4H-xlyeJ3T$sO<#XN|4I7q6r{hFn z8qd+D7~%g;5*DWc_Z>@?;WeOpRvDS8s{s~oEapXj5^m#)K@g!`of!E0|TI- zQZ0R2wI)YM{h`zpL%L)9c7e%PPK=j|dx+tM|)`&*%{7&<<1lum;vMj)uKw>~x@i|=@ zuMvGhnOu#9TZ_~`qEF?sY6uYq`rJ6*!J7YBK~q4J1R@om-6oX}B@{a`P4OiR`pc~s zbw)orWN%mbJN2ZY%Y$^{G)ln(I)Czm42J1=o!4%4UzE!HTiU`tSS-oq!DPr< z{?pL;gN+pk)0~dg^meMFLEF(@^|iTOTq9N(EF(3Bkh0Y&!tTYJ`=Pou+BU<*?4n-V zv8~kph9T3H#?s^+o1oXy39W&G#AD1WB^Wr?(7cI-=EM0o`G=uN#p;YRehKxB5mF_K z7F81h5Nz}0En4}x39hT`Nc%MCKIms^czaaa1_m}Eo4XLX5+TUwuw!TNH$|CsapFrv z1cKc;+nUGs*GGeU7MVjIzg0WfD_5@FH`WulChGNW8zhvnWhGvqlHD}r(q!9-qe3OR z607kkBM<|Zi^epnOuaTiqFV^hs;5L;DJ^cwVJd@CqgaY`?HaQ@vZrE((3=*bq%IiC zGcNE89k)P3Ub$&D^mUbDJcN&xO4>yTON*E^n^*huKQVj3cvp0o)8M~Tpf(-EO_p6Y z1DERM5xEw>4`N(yDhNLX)2&18j&XF&67?F%XO{w+JOC(TBfY4$yf*e;IpQ)qYn!eK z$eP}2MGxVU+O&8Y6O+tt@PRp7I1=%WdsLr({fGF&g-8vpu&^-Uf_z*;XdPqm>3DN-W@f$LWHbBxg+^u`%O(7bUOaXWqtkdV!di)ajk#PsQ)JS( z{<1g)FT&SA57M41cE3o;(2-lq%6Qg&H`!5KD?exuD6fD`rvdj zB+KCqV&eY!i8{2Vd(KdKaLLNc7u>0g8=~6#*m1MygNNutFcltFIOXa`w;wB2BID+I zs9)hq+@^vUuPoG@&@C|L5DxoPkKAYfuP>T$tdD8yg)Z;MY)U~rJNZ&8zV%U~Aj

a%F4O`GB5tznDkrmW>|DL2{rvI? zuYtJ=<({Tx8rU^S=nyw9;$6QX(zK83BJSg>6P@G>d7339st@Bkhbe8>k3Q|0<*r*} zg-dCo$o~*RJ-##RNDP#%1ptsaDw$T5=A%YckV`vf?wV4NZ;ero|rAjv)W`3V*$+hcAhtXThm$d z-9hwV;!<-dmGVC58`6JoS9Mvv_RYa)DPV%>wZAsmkzW*sntlBS>M{GQ<5eJxpAP+x zKIVT<^7`}|`ge_Gw9B>kfL6j59-jR3{o2}@w}3zLTRWe1C4)M10oC@R3Zq-=TX^WM z5$4*F?U>=rL!pb-~UWps1VUjbhplVS2A7!lLBow-0*VVpWHu{o-C7zkue-}*S zCG_t)%da)lmkwd>z&*dDI@fgYFoO&Ssgh2EDmxw4K06P~AwRO0;=HveK#wwS5+#JE zm1OmFvGW~u{Ue=pB+s`t5he2Q@-dfYRI$p5!LK4PpbKyQC-|=WA@1CLSCcj3xOnn- z9`oPt2ir!**x~sX&mzobi2CP?kE}X$Gnwbd7c=YWr89jyH69}qa$*JQ^B(Etp3H0q z2lquIjS|Uk+q|D^;ERtV8c)zdJD$5|B*1vC<9I4Z(W8vGe{)yGuO%RXAu#Q>>Cv;D z%@h9F0g?{gLRJE?O)ZrS{8klYzln;e)NcOrf$8^Kiq$T>vg4?pTMD0o0yXMv^sC-W z@;(^)7&_NuKNZ%jPI0h>k;~WAEcHh2eR+Sa^Bopy%9K~s9`8lUdJ_RhKY#d(Ir)#LwMSFvJ#;g3c^zJ*J0`%P*#&99T>D1obe~wGGT_`xa)^MPOoQ_gez~@ibq5qgw zX%BmC!g+E3>r3nF%pYStyZ+LMZ3|`ho_Fad4V0Rwo-s7^3A$lauRnQanIGx1WE(D? zZh9r2l)tkMo-t$3qIVfU7qy5-lE4@T%cRh(XN?e z)%oFMb17|}`)3}({2Id&C2#Y88XbS1uLGe%a%;WA*=ju+G{mIKJ)5r{xX%+~;c(m( zT_bO^OAtqTtMW}w5xW5C6s(q0Zc~q$)w0<1O*+YEzcySP2#br>+%9>H-1=f z?{=od^V0+boIi6C=06z7xt1j#KB6~1Qj3rk*dyj;LtYOFgrCxMQ8HD0V{mXE6C`V06~*Yf>Xur3h2T|zPZIB1i?-Y{7+RO4(qF2UvJSH;nf zR+(wS{^~6Ax`m^Ry#!12q#a4`hJ!tejg}1FEK%MWbk^wCwz|H)7HY(Ow&tJ2)%20W zS}}Vw`^}>~^99j!#_0dtTR*DOOi&$F7ciY}RR_x_s)cRcJG%k!qrqg*AA$RujCq~G z=-IG5F5i+E*D)^^W}Ef-?Tz(C_er> z%eLFs`qa_K+-+x+$+7b{>C-NY%jY{Kc_7-6TYGF+O{#W={|*`!UJPf9J_LX^W5io? z|D(6dlusz?Ds9GPSF8MEJzG0_@k!>^-XcRKL0}8cy-sh(;(UvH^B?;7)!6>J@>l9{(F(!HUl#*GP9Ba?A z2lB_6>AN?P@eB@?0^R(FL;b9bT=2ao%4O?>52hFoC}A$tS?;~2UO6u8%xw&zNoOx9 z++2&V5_R(eaT`it-jqrrm!mk-h$ix>Ie+=2HiGHxeUW8&k5bld04`>Ps*xV~10?h`wGK+8}tmGUG_t|52a9 z^H;ZOww}zJ$2A@=&JV4T5^NM1U(jyhkhC%F*yui5bgOZNHE*3E?jrK#MvcZtubme@ zC%!K4-hlJEa_e})aJXSr@?Yev^Ho8OMU&_S`nZv~ik=Lr86tUn9+vVlG73n%0tjny zgVj}vm~ZvxFJBJnZ}z01A3u{YR_(oZ@QsT4^u@7|ef+W5)cI(!_{T-!HiO9lKJ5=e%DsG+W6*s9B{cf2l6qYiKSY(=ZwU#0 z6hGK{@1#KVV4}ES3>*D0nJbAyM7IYZjg2*!xIQkC6R05}m!F~Txh5`D4!!f9m`G@O zO3LLZ=+{)nM3%TevaDD)MtZ#(H|cP9Wc}K|6GuHPkW$toFuG|1CX0+JY0OAY>l_jU zCQt1h6-%DOXoj5e>@v1;Yt9JHOFBV`-re04QgL6SbrUrnvFmwV>3 z0!2 zs$vH+nLYgw{ExGSdis8WRt50;H4c}%OfW6;&8Z1(hiwZ#`0KLAwa78|P=@aHboig@ zOdhGN>K1B9=j)U#anuT`60hR`0%s*5{Y%s7j|P8!sWmE{hc~J8mF`}hEUzRtD~V%b z&cQO45!;_eI{>|vxgRZwo>fm_tOw9H4W-qy<3({n7r$aL0_JF-)-Yq9pj+WJiIcK> zSrGLO%GJJ{gvm5R-xEf(dHzEA6-8*SB4L%cU9k=drKF_Z0LrZ&8oyOXt2GQFA|l$N zzKAZ+3}6PorQw_bDqngG2_OR^ZTASdag^NUu@6Z2DK3J^WHPjIO@N(5j|7B4!~KOe zCzVJJQf_R=-k1~H!#|7oI5v)T#vJGF~Il&UyJIPV(4b_oh4oJjuTOr z7EX^5Gao;*3MH!9hiv-o)KUx$q_~}a@-+G>{&Fl^CB{`hoK8tuwG)27Us5g-ih6VV zk!;wG$2rhgq0tv{=?gB;r2Hn^{J|G?@42uCI8jr~;hUSnQ*EgnyUDCcuf97D^iVRX zr1SNHXzRUa_Bcf^hT|?CzEgI-8k+Mo**?CcKAkP6H!b1J_b5z8J}I{PJ?op?R~w*MnR`>?U5n7!H7J_Xgzum1auV=tRWn67j+OAoB-Ebu@-y zTBf)aL1qNw!X8 zv>^3cZ@QT8U6vg#0+|~EAoL!sx!mmK3cQ%#>Oey4S*?hGW*m%*Mtu#Rj0M<3c4R{# z=<;Usv|&r-NS*mIhw%*%-FrWNS<23Sq8g%Sc_r{Z->Rgu%0_(0^|o1)lezN|&>32D znBK!?RWKl%%Ez0jDHn4+fj)(R!Q6Ts$jtqbBG@;eWa^OB2bl> z9hZU8l{-W4;|T-Vd4QB90bWl5NL|?8!C&P;H5xM3o?OBmAi5=iIie&|p7&DD%A+oT z*6Y$HBC+itnXNmypyIgR^i(Z^o;X-Gap4<0@ zGL{b1t$Y|CIBSOw>jVtCwl`*L$Oc{O5D4dL{YD=Q-yopFBLR&ea%P%q4LmKU!HNw_ z<34`WM)l>UOMOSQf>5@m2|3^H$rNT=B#y-Mwh(qqgq>$?>xO_Nv6Bo9C#PNz4(YQX z3`aXT!4YQpL}r#a2GJ|0qv~fOfA~MKFC^w8va^+288hx#m@6`o{qt%x!2)jzCBZaW zbLJruT0M6eX8*YwwFdvy-DAh=C+YkD@)9 zHHi*=Kytt1=}B2DRYJP3yO37T+xyqUC?lyvg~Fl)=A8gWcrQa@B>x?H)>5X&k8rI) z!Ud|V;%R3B2H|L;%Tp;ZLF3Ujzzm5g)ko@_aZKD_LnaY`Mq||0M2_3A(@QvB`ZKS_ zQ8ntU`xEEUIuX1g(Ze{)Br-DvZQ*=JnRE#ytd2VfKw^{H{imgE2eEB5@&^8ey6O&n#`~?_Lj}qU3-B_NKvwXic>mzpwZ8ph@lUs*lh|oa^3kkuht=+I%P*_Kl z+KqB&@3-Q717xpNQ!o}{7A0bjP{Jkzl8=1ULGU*K5b`%s0liN@$eRtQg3fq6Ht_IV zr&6(~w$^I5qD6qZonHi${98%p)EQ^6(brZ-`y2ag$a-#d^LJZ}BTiM#ixr3h%f>oD zXYV!1k;en@Ybgi4mG2+0WBy$r0JX#nEdAQl|Iv?B|AS4z^4LQ?&(`D^m9v`F#617J zucy!b&ip9*iz35Nm$RC4NLrx1(LbSr?}>Yj5z7NFaE4>f?y*k^YI`7ZQX+v5m{bnB zLb`lr^ffc_lkJ4s-7(v|dx5gv0N4B;^z?^p@H-)c zLXMW7_e$yp6J6lz!m@2~tlbE$pBti5@{y^SsQ2}bI5JH`^}EZDXvXg%=y4=jqlsOC zkq+)Ws}_;nhj`1mb)5)5=^11zq0I}BRiVrberirxg=-=dUcvoZodD2IW8HBT4Yrei z{)lptQYRh+*hHR@ul%HV+k2N4 zYw>1ov)oR>?{EKuGhm4bL>88^zXt9!KmMN4NNx{FP2SCU&?@&Vv@q&teS(Q?#gX_) zk7EdE3r8#-_N1RPu0Ewos1qG}Y?g-7dczt~;*s7@Q?z4S=uSV+TKm(8NC2a0oazhy zlcwLdV~gKInPd|+r7lXw)ToY2WI1C6G2-rx$ZN!xE@^(h(2OS|DDfGM-Rat62Si^a zm!Hz^rH= ziF8wQ^WYU4l6?=)pr#?#y$Scsu2g>2^oXb^-lSiks@MF2z?HW2WpH4i=%?g;s0-gj zV!sR0AC^8z2XX#sphF074E17RDK>h>}!fY4~ zb$rhF95kGE0r6vy-Pk}u-m%}f`$FtXCxT?crs20$NtOqQnC*L>6ZoXkpoGp5G$8j3|g-BC=+34ofkRnj7ovpe}yFCZ`;qh;$>&MD~L zlRa;K{=JinD4|3f>qx%P_xQZzYUFz-=TgfUS6cPU`=Mx>P$dt(pl@n#X1iuv!7NRf zec5<@C^6fKH5De!c>$r{Qt?}3c1*^1>kG#A*o2t8j z^(R669($*+(sxlY`-vnrq8>)0FMW6tZOR*f(r}T=LxTK*RWQvw8C}E(t z*Wx7@dGT7df&`iWPck)KWfsAwA*8us(lRk6yrH5XQb@$kpmPTfu)&p3C^$KycnKo9hH?#dlIY!7&YzN#A;{ydSN5DuD%0fj+(C7*C%+yf?;&@Izh9Q3s5iq_R3@N#ds}q`Sx?nu-?mTa z(}F@oNLU+>w3l-DkJX$)m%lW$|Nh`Ni~;XnV_=hTCJn5&g;omDb>GEUD#zp(2i{H9 z{4;~XWUVT2AhhWwpP3~JBqb83UFWN0-Q741fA7Q&yLEluNNe>hQUs1e@VmFo4SZqF zn?HB_WX3|!(gv%rR26N1Aha7kmVFvh7N{v&Yh-e0uF1i?Nm2e zFJ%r6qU7)xN~X+-BD-RiE$Fp0ybnxPmybM=Bj_*uU$RG}*vE8HlKDIS? zo$+2{lc6?#qv9Y(5=^!pa|N(HKq<6;WrtUpsNN!Gyj>WILB?a8_Kgj5C-{mmW+cx$ zT8wB{6}Cj8(5XIUhT9rP z?+Ir1$?M%W%hRoSC32|7M80Dw2Q^&Yh})AM(5NSZOp(MvXwbhWx0jhNFwW%;VD zu}ma|$?g0@hLl*rL}mb?i;*!^DiBP=fNHE}_mF*Ob;3aZ7Txw4kaLrWP;gP-H9j^B z^9a&rlDeXgboXVAY}iR-v(meMxB1W@X{={8%&Nd|=F?L)54uehDehm3PZco9Jd?6`BvxpT=QXY3I)Vh~$^Dm+21yfUfxr9kcw+ygj-VqY zUhQm-51-V{t5ioAk2SA?2ZEt=si)L>jYXHiW05|RKg0LG?%g^~;Qi+DtG3B^r;n3} zKHV+6EFffNPx4E&Lb_l0a`0Ig@puYN+&X9aUp*4qk2F1C+mt->PO7Uz;ty+SC5G7QnV3nRcUOPn^Vzxc%5INlOfYqilvJxi7Q9 z)gY)l#q_(=y>ClHPL0x*qmgf3yADy8RLM1#rBvGPRa{2KxBY9V1?JQ`&Z`mHWdy1_ zUGZT2nZiNjkyXmNQ{?-b!#FJ!s4hDi`P?F3;})6f5C!r@BJz6F2HEuvS7yQ=VKR?% zNyZho^x-7dgU~P5lVuJH5M6jW5B1uUArS0 zzaubwd)|jm*mDLbSw;2k{70>wRXLvziSd;|;r_4C8*oO%Oh}f?=-a-IpXI?H7=(eQ z>G^$}f{Ww~Fxh!+M!UsZ*RU)}1D{^$O*JBACbWEp960`YwTU2{-jplJ(ff}T$9TKc z4GF2}cKo8m(Ws}(FXAb4ecP|%^kEN(_(LMvSyC8C{;ub8VZalVA?vK+uSI)}alNcV z_pyeyhFridO}oQjNzmJh%u4U*9WQICJ3&tphlyAVL&56x77Qv&a#XKnYjPVmAkD;T zV$#e-J=+ex+%NdEPwp-UZguAN@^Fbsi9>U;J6K&@<3o*cXFkaqaNBLcQLX`0v4CA` z5{}pH+^64VqS1%^)J(xEtsi&hjXv1R$Z|5x5x?NJis*2h>rjq-UBB&F@MCP3L2cOC zJ?1AmNHbP z@=2C}`I^h|4>LhZ>e-rvxh`l$l!r+8EVERY_zbC)M>nc!GIq>7MsoaI`fH|M>ko5z z{{e-xxgnf1%9Vh6x$||UoY#oiL8aHLYCK)&BP~xWF*$k4D_u(UAur;kC7pQzI!pnV z4;7PP+(X`Gn5E9Ma<1)T(Z#l|#KHVx*SbX*oHQzavzMN(MkK>My`q7OI$I1hy@-b* zZFiA6$?T51Foupr!=!OP0x`A$g%#hx9A1oq!B_M#JMYGv=7l!9rDJof0r#ik2$*z2 zz4;Sy?>$s1+4p=-$Jx>fQmk2F#u(ZBEZBT=tbDZ*buJ~86jx-+mbA-skC%-pBIw*u z<8Cr5t);6;5qmLX`D|ryHea`kzf<1+;mk9`>|Rcq;MTarGv8iMRmni#6Q`_(ugV(V z={vzyJ%Pfq;ON zh@IZ0BUL)md$3Cjy;r4^kRT7!sAUwnROS0l$EC%N>0NA*Gdb$j;+wtk=i=9RtEIAg4`y+ z34UUnbsngP`>aYhUCtZ}a>x6P7k){Bma%p>XKe~Dh-0qqVwmsw^@y!P>n>{zc(QL# zr$vG8R)LU11j(l0~4(=Ou^{15@*5#rx{E0}P7Z>}p8G!v%s%3y}EiIsoN?^TKW zAvk>tChCG8Y$>|pvmgo%v z9oB91S#v=S^zH1LbFNL9_0p(zyWixrp~g+|tA(L%*M-Uae7&@k1tH4M6Iu7(>>TvD zP?DPo2r4_iZzW)L1?m!NkAnrE6=0z!2Pp1m_NODN%6?Lz<2{9f)DxNDUZ&_qicA>&OmLOK7qeNkIhY0Yj;}<|AgF3l+Va z_hD}VRorv?LRa~8oW6qAM*hQo=Br%IBMnb70xPwnP&sSkv8uM zw@xE=*9aIio!?SQWzRRCQnG`B41aC+h6N6TUO3M4Aiuaq-22{A_F(UcoqrLz;P&%35&dQlljmsL4;TJLYEZ z6({j_d-ur5_dcao(2!BSwQS=ZB?6H)doWgS2j|+Qlb5rE07XJaU%&@Wa|?K>1 z<+#iXn^JR`xLwWQFHt=5{bkeTC;-Q$`pwm_np*qifiBr*wKdY5Z-A7J=e`OCDr$CM zpjZB>AL<$4`kd)i(-U8rESpr+`~m`}H{I*MP!C9SXRL!v_cMte4<-lt8Em$rG$Oc? zA+6L~+l~(C(br%+_w3;jy_<;l4~>7=RS2Y)Ven~tvX_)#U_h@512bv63sndTIx19i zy{5!bpweH;{BQy3-<|V|Q4=2I8{`3ceRf)b%s&9u`cg0PEyr#e=4`T2l--Ud);rb7-5i8U9MK5=- ze$3H6y?=ql6}U+$eF0j~*vHICnn*w5HZWU-JXWqAI_@(L^QH2cYAnA{c(AQNXtNKF z?o~hstey+zz|&CT(;z}_ary5MKyrJ5M7enQkn$QBDIJ&u)z5j0c?k~;h!61hM{?>u zvbxs$4b;z*-hXY@60-GcmrW(-OJ)?5)J?HYvnF8y3UmhW$ z0doLPVDr`XBV>Gw(d7Gfbg-^mq!cDl^C8u7P?s7LgT;i?Oj}|PS}hA4yDg+gJvh6q zn*i-$&;Hp62D?^MnO)oRo-vC&la6H+e@uw|cnIVb?Xu(&2*oG-c=Q@Dde|m_h3k+M zewG>mH*1V?Hl0^fZqf@|KY(lM(b3bB@v3XWFH8Xei<+^#oO%_&4Qzfw*cf+@o3>Px z|K7Mvdmx3p@Xk0jJ>4poSs+E^4N{;2D5Tl?7Tr-97&5gH_o03a1~2X8>iZHfYo{{q zT@p3v(yHDBZ@Lrs%!P|-SR6o5gb;ign6}ovmd?8KOr*1ebOwB;8-zwnt=a`U;Q><&dR;n;#2?A#(w=g1xZdRSiITuKlX@PEf{^Mi2`npUYmFSOBh7N22T5q1 z4M#oBVLi)->yCj?*6Y8t@6fZ1G9&R3CW0Xl5r*ZoZQSQwpxUGbWCPSjcGw9*(|`-K z#OViA^X))`0-u2|jUiCNw{P$0=zwJ)1_S+P z25U!io8d&CKIU+rPmiKh4sL7&UFZx5W<-KHnx{uFLOO{@j#ktSu&RUZoav0m4Wb+C z*#KlfPVXyRzi{f_>zDksNTp3(K>hv%6fM?1T>qe$2WUpwUi@R~1t;oJWld^~%Is^f zLSAMCUyQ3ideJwsWyb9`c3``0^YM#3zs_3Bl&j#cm;RJ?P`601bizm4Kq{;)=xKAY z5;ge&=L<#XbDql9-VtQ`DSY=z+8WWMkKBeXJm`LXc>SFAvCWOSjZVBxN5?`~32}mx zukG6qp%8dB7Rc<25QnrOEFBmQu!ar?bg}?!%ZT6F^qt@(9(H!2NA}+aon2bQee$m> z&!mRGtw8{s{yp#Gp~8B+PS1Mp8! zL*6wC7rF@q>_a*>HN=ww%FIJ>!fZf~(>Ze5cz8(UIEn3>=I1_KW(6hB1qolR()ZcW zjc&P_7k)^Yig5rStjr(~0LL3}PDbj?W5||xDr66KdV&?wslBDQLFsND3>)X*eWVQF zkCoBaIZnOu^73PWV|OQb6}+Rx%Ie>uNm|Znpb)hYOvDG1h90w>8DZuCC4V-?3ZSd? z;V?J17#P_iU&d#HF_c@g%TlL6j;nrw)9{_e4>7}2tRMj_F6OUDQG_&z-5N3ntpd)t ze56Qkn$B>5C`v5LfD)(lc5#EU`80a{D>MEHrwt`;I}y|Hltk0m47ZlfyHTLD)q5F~ z(YTk)3A6cRM4-1Yc_C>14b6F%!P9ZIHUUxN{AY@<7cW}QO;>25QPt!arc)TNw6>sP zpWiHimTqe$xYjR))Qr9FS9NK*{oWBumi44I9@$sRZOMiUdWfZ>*Q8z1y5OeaUH8YA^bK7#s;U26_(P4v!y@>kGexR7mj zjX^YTEb#%xBK9_^inT0F<3pljL0jK}$1@KGW5OFAU^%V4W1h*(E1OCUfvH=ouSxXQ zE#=SU&0GCC4d)Yq4{b$S>{Jw9O&icE=sEIIY72b@^t!TU`l)aj*& zsD|#8H*Y*Q+frcE#ZJ`qI2Yy5^Gi8|Z;nqy0Pibb;F9>r!oA^xpVedpoPs8WiGKW+ zs$vMKaOw#-ot$-z;MecNQT{2{HtB%66(lJv0de?rsdfp(BgL9 zw8DI#4JDnGG9>F!Ylg4!s{?`WGP+;YQ30Y)b9an1$3=Jo3Z$3DuBW)Y#;%}hf+R~F zc3YY`2>RAiJFQ@FfWsjhpeK@Mu?mqRMB30m$J7y_Fq+XdLgO&rpz6>JIO-kh#!&%< zWCws)JCvM%ou)0~=()Ga^ri@k;R}~qS0r`lTDG!?3}aN}_mMrPUW;Hj z>dHL>^#N{YTnK51H6O_R%o;rNL5%F#Png{po8h*O=8=2MbLb@jl*{O6vWYa*}s)v6r}sWY|}6knAObG;h&QtD=1 zDEl-}o3Gs;Ns&UYt?0D;M9iRe{9;Q|YZTC_UpG#uGMEu0L>KcL*DT-H0w?@LO+{mx zHp6JI&&m-d$8=cYxqe;d@hX#abau;a;a7tF~R)&tyKt0-Q@;fBTLO{5pUE%&b| z>8&AzB6aRltK{Y)@~lWgs;o+)&}p6vTr$og=}p_Ly!J}LZ?wUH+SI|q)YCF-w@c1g zC%l6ToXNPVB2|!&iQY!lb<_@~5l2?`riZw$>wimc1Cx()wo}ap{AAq6JAB!%#f^}M zKABwJZcvwhZB5X~4r|JFpE#2$TT=62f7q7%0M)(w`AZrx$Nff~xpy)S6@|3t8ti4V z=bBfk9rVVre)oMf3ONKn%W?1fmnZ6+9$`UPlDc!UZU&W=*TK$GDmDhpV7R3};5R7~ zw*)Y5b;PxOAXV5cARRF6Ly#E47#kY4`0FBqhp}(h1U7;CkdsDHo$FY?UEWt1lG%{2NPFrw(UPy z_K#$m9MC7{deU17CR+E!?C&0cu&-ShTH&xzQ#I+Lq>4ytT`el^+Id8HiR}TYfZ9Zf zI!z3yAa_)2AvE2mn;UkJmWZU0@H$LM9Vq^Ym-t5}!NFO39LMIRFB z_Sl>97VH*h0rj%Xi&AxpPh9U2pf5%(Kxp;KeS&>iP9yAL4Q{zAEHZoe$nJQt2hjHs z?v89S%|UwPww#rz=i%q~uvxsT44RPp)Ukzc0!vLPcEPQj=Rb+Y`8D1MmWQ4q*Bip> zeZ$>noPvEthWL{7d!avT}&qp(z0pBFg!yL4-s3*a$h;ip=zp zHLJofj&9+X*szqG+1na%dvE18GK!+}V5IN3V*VcLwUqfs) zjddB362!w!)=4aMB*oRgI4!nreEBe&ZK>p-l5hPGN(3%a;foD|+D+(-Wn~^;QS`Cc zY;}}ptSLzM^r^UN3x`1yJ7gt&Tcyh z#50Ze53X~aM0Rya*Y#s#VOb{Aqz<*68RCJXMi^(JiB0*C$Yq&SR8yJXlFawgFe`Kx zqE({1b`&obQ4{1*9$LD!ca!n$ggSya6v^5+FV59v+6kn(mz3jCHLaNnZH9AnU?%yU z$QAq`?n*#FfqrOl&JMsFJ+PwQrRis?2jEP#16@_-TliXsYrT0{MmHuH8WV4~vI$gR z6?I%LfsMr4+g2#C!>*Ts0h=#-T5De|LFtb(enuG$X7e8M{$}5YruX3rW^@mrN23VR zSFDdDJy6g%f)8z|>nYuCDmZo;tw5-dgPI_d+{Rh+vaGWo4Sl%#cS6SqHwXQdpUPV1 zqsFe;@5b|&wgo{~rcu?s_v%k5J)-spjn&6>IFNBdVT;Kj@x1e(mpL?n@7l?a^jjlO zBddpFOooeTQ4H-a!fp<%?(YioPF8r#kU_WlE*8gO_{Ls)MGajYp`YjPQ2ylGZ4S%T zYtr|8P_vx!)dK#_=Kf95aGfhB*&cW}hZf1M4Q9p#ATFrFdV$FhHWS8@S~tUr3MiqVhJl2Ox}s`q90pD23g|6C>?)cnS}mJXY*7Vjy4jS^ z3_uCH)-524TXcMbnZK2p10|R_4VNS*FKz==Q(8q1Gv~Ue8Pe7oJlP;dPW!MvB)0AqMuzuE?}p zG-jySO!XiQ60D|nJ?))fXikCI@5@Ih$DduQLi%6xW=WPX_dc88aD7M`=ilOh9a6o{B_QeY?CI09 zp#vIGd~W>bT zywc4pr*GDU!p3M>MkTo&F#wahC7R_t1M9O1f3)rJ*R|~P!LDm&3a+DP<3zOrPUwRf z(-N)Uzn=GaqT=CH@MVN~1(#;BQ{%YPPqk=x|AEZROBv#Es$bE=B~!>!GXMRRh>htN zeU%jji}Z@Q*-Fm4(%PNOJ3ItCvt08FyH- zGo0+tQl;qokfg3By%2OO?^SZ})VA{zvUF-wPVHHShgc#P1+nGjBj5%uk*7F3J$!+j ztBVh?5M)LiQk;^0>soh}Ti8+6X;Se%qFfzA^sxMUE>5Ite2- z_XY>Yi6xu!TUA~fr90ncji3C=bV!KOCVNU(b(5YtHLTK= zzVnC{7vWxb9V6FyyQfv~+cVwHIq};yF04kS?v;bD6Zbo;#cF~ZsR?@PD%X85pQC|d zP&=HEJUnh*f+TD7K@6{5eiF&N4>h!+*ci^-nL)9VuTp01t+@rb;mnric{u7Dor2s%Y`GR5`}>K003~3>vN-Mmcx!&_6E=oj zhqGhPLs_zugDB2v?H?LVJr(Q$eMHY*%0zb6;hT+Ez1IbX=Do!#Qht5eaJr-8?8a-; zT$A+OFS>85eqnqhn>G|C9}b5CT<6z^{PUS@Rc+~l2VxY+miIhq%C{>MQi#IIa=wjA zabsXJ92cwb+&STAJLslwh|q7b^Sj%1q0p*8_47q3=EuKnGy%G^wkpxnjN||DgrqHa zh(68h%|fhURbsQsqWoA)jD$z?_ZJC?MCp~mny6~`iQ|VXbmLecZKmYh!=`#vZTr<7$FDoAw8!A!42^53%Kx0{<>|P5^bSU4A&sGoHhF1Y z^|zNq%K?fuZ#MM%^nbqt(o5VwbLZ`|^!fZ-%bH)Sh1JYboO<{mA)nKmGQYj%RN8% zzc2L=xxFB;5ZAN@hM)UWRpcyM#URD!(EhG%hYXIaGEk=4xN72aC*!5J(Ia6R(of zgYR6QM})4ASQteX7hL*4-@h~nH(4cQ^bd9*Re$9?KTU>6DlTLy`)d41gal@(QY>;j zPyaAz@K?wwxIZITm;~qkrb4nzV74xCTT{w&=Ay5oM&oj&%Z_aTCltURhv5p_u?z77 z2glO^70!iqC^UCSn(ZH(BxNrG75w8^NrT_K`HwdPDP-_QjTv~40Z_?xynZ-*dZs~e z>R3C%OB4C@fRLP=H_hP=pzTKt#wEsAZEe4nc5!_>oD)`4ib@-~+utn2d!NDduSqG1 zMzluzWRqsm^{BrlHTuv0v%2sT;LMObe~j~=u<+k!Bh#xZXZR6av;~uIm%57nyPx2_ zY^VACqu#Uqti%2U*MDEjzpq029*Y7wCGPT4%e>ZamiFhy{F!KyQC>0x)mPfqLZ!pM z*89J1?B9=pEYlkQm9F{a@9VaWkmqC`buU5(d8&{OFdI{Yz~67=kK3Gm1YD|Q^o5@R z6#NdDsbtscx3wrf@mP$k=Wht{zb_Cn1B&W<1o14g|NhyJn+x%vbkHZ5_1AjDeSer@ zXT8ZNQuEJakR3O?C}6HC_&?u;_L;L@tCHt$O=@PN3`O>En3(YDK|?>D42r)k=5*pA zL0s5}kLv&9^TAd+zP`&4l`>)c>c-VrZkf3G$9mn3)}bG>f37F#Wz*7tt^Q?QVl(`I zTm`~HH&{AyMZrhjY`EM?sBl6yHPO-0zI~Nlr|WA_JTU1zI6HboAUKc>?QwEQFlVaZ zAKaCevIRgVS-}i`hyF3OzwiF%$_rFe7HMj1B9TUo@|MQ5Nf2fdbl`>0q2rRqK-HD~b=?wpKsWygq=I4k;nCQ7YO9qucO~cG< zwd7a&X>D_h=SM%yA@XUEQsFh8dHo-k5h$T#;=g#N$#*Tpk{Pp*SszYQ-uds z{V2g;`bk8Xfpimq89D==j(`V%+@Nzcn6k}~6PA5b(4%*ArXbDuwW)^<7PhJ?Fyg^b;LGn3b#UCF!IE z=L&EZGH&4!5qM_{I`m$AT*v?QB!-lcJo2_wYo%=j4zZ9-$8?j2F$)VF330EMk3tKN z_sMvtSE=z^X`1a7nM6d>vy0v!TcekULYp4cSXqzw)plPTdF{FIK!w&?P!{BUT!RJ% z6T)=QYRDHph4_$6)Lt|?rgdskQaF-z&z`=g`{&KjI>KL?{&hA=Wxe_MZ=U4em7_I= zD>wOejLV4GO|33eBF#PY%_Ivg7RuA9b|sE?c*rqyAbYzoC)y(&XrdTP?( z=`(rR^Vb#ifXU)BX*2lY>qmNB&mJ>bJS@Ju_de+n>t5?%uEhUgA*W$rDu`72p@VT5 zdH85s~PDPhi-Mso3#h&J4s z#YsiQ38gE#X9RJfMBW9u|MBkOMl44O-&-p0uaG-WSC;<8{mocp5-!TR9@Uo52(|An z%B%EWFshnvb`ET1)Y2^yXXKVi+HKeKq}qVlUCi^Ql3uH(#)X~lR$ZxmI!=NuE7?au z3f^$bl}8ZA`arJdy3+y5!kLBo{Z&JPk_tXR)Q!b>Ji>D!-%xA-@kf z&WW%Zb2Xbc{k|3Wwt4*SA6NeUOM#>iWJ9a#H=9b=_WNe;;>T7dym;$u2BE!Iyro;s zXy+%+sMZW$ElPpkTFFLq^;dRlsSX8kkuv5ZngU)k)^UYuS4EU`jHXCK?Zr#;>{D^E#>Emr$At~cEd8X+w~KqG6OJJWXF$PHD>I1V zGFrrs2V8#k1!T3hdt>flPgf)^-Tz9Cuy`cz))x+VAiu2<`%m+EDSQ?z$St1tGIyM0|GxnyV1Df)rNp*mbh68co~+26uS0I# z=7o*Qg9NLLUmXpyDR6YaBwX+0Oa&cno3M6dhffJQ!W_1$g{tMvQ!i%I6F~Ue#Roox z9c8F?^@^p#1aI#&We1OB++Suob};L8>&)96(D?I;&UL(ExI=s6xnGHwWb-8jU(~gt zsG~udYT5~L9l%@t@kW1sNu=|}!cvItd}_WIy=HGZkL{iutG`v_YKi!0ZlYtpYO+ab z_JrwI(3qP$zTNfopiz^8P*XDUOba$yKuN$Y#xqvBcWNDy|mj+c0_rEvalA$8=pn5zlYxH==r(wA#of>ydwlt309jVoab>mGVWqXvdZQ+F~ zoqM+)a%{0ZkiezOA1H^%IBi%^9=l-FB6Z6rplEjsRU;V z=RBSfkel{L43-UKy}Ew)B5(z>rtqj~zpMHB2XCC;DKjfU2@>V2%@W;=Gxv>7cXJrY z{qNHzh~;TmxkzK$;J4AwLATLQ*BdAf z%CZo{2?^=;-5ws|!wQ`xRh!L7`JnF=OIPt3n9?!o)j_>Pg>7%=W!C_Oq(Ituox;G81Xk;zxF^RTQAYcnG^(oJ&UQIL;txu1STnMZRgZE4TKaG_inV!&>tEl zBT5&uL3aD}4}sEM;F#H1Y;ujkdHh-@Hr{qE>J^^|oeWgV)>a&9HNkbM<&f(@LlD$QckbPrMER{~&j zcE!#lCnpE!tp_2>q-3Hqt7OYdvs&fd*Jgsf5v{}0gp_f_*S;D*H#gI2jh%^4-d24I z1=>R{u{Wu{!OY35pf@ugKm_Qjp)-1X_rA*PY=w^4jeXrYKUbI8oed{QOzouzsHE$Y zukhxG(>^Q+@logxIp9YS7rh(9!k;Gs_i9hRcVKwD0?Q1;{F&N8j?$sWlaHsQ+#?3f zL_+=&yu`;&mPro=t$=s44#Qw{^e5LKmrKK3@teLz|BKb^5FCRCU${wus8rrKK###O_@meM^lJzh{=3ix4_yea zl8hfSxno1IAI7;aGEwX5S$Sn#PQYvxhJ5Gu?R)(uGL6_^n1SP1KTO9|{ zmpwLPYB6#As?h9Iy0F z1`wD*eeE75h+s8=gUTjTWl*XdAw`AJ9a1?WXgf_SzmS10Z7XY z=O{HeVuc0<%zJD?kK@%}R{R8Idwxe!G7+x^DUj&zsegT!PM@PVcF=Tu1iV1ETdSIX z9G(IXDIN6o)ISFAz;itey0?u_`Jjc4UPN=sP_$?LeD>rT*cXrD0R6_Izu z9Z7~#ZQ~ssIv&>)cbz{7W6lK~<27R!^i{ z-~W%r1NV7PWPPCS_Clw@j1O{HzCjU&a_eUkmUF?N9RbMnz_g7|M1Gc^=uN(#l24q4 zQ)CmVUB{YLA5SY7fvlCknOUpzw|Ic|63F3IzvuVa{TYvTnBPQyx;>wS{ggcMQRrmH zF-PWF^4%7pY=2`jxkr1TWBVBGd!fPVgA5zHL;z4%9UKIU6q`yU>;xkdpFEy9P2|I6 z$qZ?Q<_s4%I&5bUT!nz_O(_9u&xhwcZ%); zp!M)FSkVU5fg@;@9q0~qo2t~}YhI!@`;JfR^*79*{?{484{_XlA9M&IFa4vGbcigN zq50sz_rRST|MzOD(7S4l3FMT-H25@Z6{Y6k3n|b^C4g!ZPAR>8urM+$QG*ih93@7a zGdS3&V($2m#F%hRU*%Nz$$Q4Xy{dh|wx@x_dN0%8zW2kZ$<1$ib@-opW%Jrd-Ea2U z2<2VXT9AI*|kc zmRNF%9_|xsaWx zkAJ}rGc@n?&yiB4!lhMi?3ufPB=W{f@pY`ChE~Mrh0SP>*8O@OdC!y8{4WZAOF7#N z03J=C>~Z_ehJP3rtu*LBX_gE|COf1It<4AZ*Ek{|&=YH_hv0{f)Gr`4b&J77<}>`Q z-GjVj-#PkD{_|SlEGz)QC-*oJ1#aQrn+N}bXi+)zyf`@%NO)E#+;CT_VU=0~l{6TS zVx{FI6%X3kN2c8Qe0SqB=TWb*k2-TU(|-H?_+(NCTF#=z&+k!Ywk@A}93~-EaKLjv zqTs`?|7myoz78oT%GU8X-CvVNN{6;*SaW=i}7==W9 z5l~o1Y#%Rb??;VlE6CZv$_LT|drpl$@O+=UDZh@dhX@39T=s ztt<{Jm?Hwp4-QVpF{m=6LGQIn!qZ>TRb(5vv z48Bq&q-eqJ-%z{`oLD`bHztK`bSAb^b+J2LVoh=TFFA6A3ib!@ub=9KX5{x4ar>zd zAljj$q4Ahb8%XAk6;C)iI(CAQ5+@ZD%tfyk%@4`Eyw^{XSOCSXp&)>rI z(5hKY2fxX-?3y<>(sq!ISn7xjUpvf}%HkRY2?#a5xr9v5Ur@;24Uy3vNX>q8UmQ#& zt&HX2p(|`V0D3y}pj$8Y{d=`OedaGFfD13gE$1E&=8Ineofg{r-$9=|V3yq+Ym{-E z#>5$1G?S>GB`>^I5J|U%*dDfUpbU0!$hY(vvj=*Dhi}__<-SHnP7AtL2KhHP*pR6D zdM7P+LmiiqNMGC-H!CYU0Kkd{wL7$0GGks_43|!v5J|05Bnwhbnr#*TsBn~$7ESZ6 zNsaqYuXdUWOdnp->G%lCUvv*2mZZ8>q$Z$5rCKO6+9P=j(5IeY2yT%A&{Y>J1 z0l_Fy!fMsE3e=y4ID5DDeB@s~RSn!zusS9D@$<8_6s?60`P-Q?GQ`~)F;ZCz(ncx; zjn+MWOe(0bs))+|tp)I#vlcLHBv+L3BzHe|H@98F%GpIzV4u?E{bFuQ4KbZgg)HMJ zC~xZB1S)?>L3Ircg$WEJZ=pxh{JdR293Z{M8~$)qgp{<|fG`fmTvb+$)qqBru5K>- zq=zWzu$6jJ6GTt`MgSsuaPwZ0li<&TCg`veNJAUE?qAYVsR^0*NfrY@89n*}*JxCP za*%#Uf3qYE06fbK@88ewH`x*u*>(-O*dB+qfl;gWfMqXS69B4Tx+!>|9i;>EhvpNb zHP9s8%!^YgOMdBSpaRP!?Ib|zmD_BC2JR#PQPj^sc*wb%llM5_yk*PlmGjHFr#ZDX zfDv8ifoRP@BoKIe9%(Yu=U_KbBz}ZrO%x+Ssm7ojz~39r`TJvy!S{={n1Fg&_#G| z?0HgqQ2ylPUq54r*~3R=zZTBAoGtwk-2+7a1l_1eDzm@4WPuQ?c-Cs=UG(> z#{rSM&Mei?NX2|b?~8aqRzva(=#x#{W-B$+m zbj>?U9&Wbl``SvD$8HZ5zyrkr#uN7{aZM@hg9IhHW{B}VeABx3@SpCq!?=y9{=@ro zwg}xX4adlD39xWxI%Rl*6p%0U#VyEeon{G0C#CmN%0yaOds%l$Y%(~myp_@w{ z$lJ{WI`l=1|I`=EUs>=~(C4~9zlxFvNQHiunGL5gJy&V@8))Fg?lW2HiV~yhn(v|O z`+$j7;mccxD@fvXQYpLY=W{P0W3Q6LQEy&q^Z{`N%|@gV_2e-K9M^dzSI>2PVC$O15^8yaYu ztv$6l7FrGXXbwEkt%=~FclM&@*;>E#Xm0hY@|6@LPdG`(EBg3lwQFNgdOQ&OGM27a z_FB~KySfI}G=@Zl;$C@mpSL}|uH?q+x4D&Az~v+L`wCMbs2|tpm!iPY?*j-AL_aC= z3p7#g%TM=t6@6Y@T~2u(3=q)`Ku+#F=9qSz)8_u`GuF*FK>wfJgH2q`xb>8yBp~26 z1uoau)T(MuqG(}y%H@UVZ3VoHf=e*a;`?NU@{sKIzp0YbYGW?F*4kn)vO1lolQcJO zJ<7Be4G6^^FES~V@nD{LJfE`vXTWH8KYvJofZeW6g>%qtkd`fOOUqfS%?i%W_(bj92d>kaxPo zsh4A8*cY>6e)#ZVJ0Ro1XE5ZwpAux%3GP?z8n!8nnm7gb#h(kXxQ3cgAK2hDmcB;- zGt4fi5*{GMk*~qrza{$BCe%GZz)Vj9oM;&`sN<6w)~D7OT!QyrY2EbUrrV)bNz|J) zaiIlS2Y|?*2VQG_&f`WSfNk9)zuG4+vgwPSyw*&=(cn^0+?qR1iQBwo>wAc9T?K#0 zU&`rMY5(^V_Gdf*X{YiAY#URWxP6zwzVA<%{4n<`y_WQ){H@D@x@WNyL>ZASh+E5c z#)X-K;cjb|*((GFQ7{CxTpBuDl66hnTlX4a$&_@e0gZE#9C9*HCZ>A{ct49U1C89~|DEU{HDoAYGmTGCQ5u-9SK}{8-i3g$9UG z&>k_!f=xt{s=(0q2Yev%vjQIexoHfl2vC}J{56nI2|jC0S1k8m>xz{PjH(G_QGVAu zZ0JD~@iTE2sGv-g?ADSy>XFwSZg7RZpMBWZaXfV&nZSUrm#r-?d+$ zg8H#c!X6MR`=Td977(egbCc6|R>8Ps5t%LqSdx`~^FXbYbaI?hu(jy~jA@c8w*&oS z(%d~Dpml9c1spvKfr8nFr7Z$lZwW6tp) z1EKh4Aecb4#QFp<>nXQk1^Rtw0r_&X>Lqp)kVf5&u+Td;1rNTXt9rU>fY0KTc?nR5 zZ&cpBSB<^H@9$5G#Wr9=x?VIuxMW`mwYS%l6dDzAn7?+oY}Vu+R#KJN)kU$mf02Mp z)G2E&?1jpc!6B?*olavZ*Zy|Of1057>8#`tZ}S?^D%U|mpDSwDTbqFn0uuyi9NK}f z1&PXboxH!x&B7-z*V^sy4 z&A5*u1!=Y#m`pFEys$D6Wb`D#5YG?nrZ;jPaHb4+p9~%0>S34Kx$ zvqO_kz-a)qqVmCRD5>!oCoxP*83EbJau#9y6~q1kl##y73a9jz@C%UG1`5I*ARZ69 zzXZ4z?LcC=3kLesdgNYZ0*Qm2J+|qe!0=xv+>dt15N*+i>kf;Z;qyY=iT$Jh#m=d06`{DL+ zN_4xEmmdIro9-4tZdUWfyJ({6$RR zghl9-PU4vuu-~qGzptgwOcL~yW{j*gejV%!=q z28C@UuwTzR_n$O-ZTqMh9GDxxrWHO-a*tc{ZUZ8IrZ$iO(cJ@4`S%YkNsu0(|yi-?{BTZV(2*jC<)qlG&j+x^-(Gv+SXN?x2*&<+o z@S772_4c3$SE9IugTj5MHiTvrpS}i#;`j3OlkPx(N`A9TisT8wtlJDF-&r954_(SK zS`am=3lemmDJ)34i&O7!F%5Q}tQJxYw3#*1Y@_8txpSV?%Rc^VH?uI2Z!YR55nJ{u zL82qY!vV8jkw@33WjZSC?9#EIhhGjVROj!f?iw(jr+f|pT4pa`b{Xj@0BqMX( zq|8!8fw|cV(}K4KC`&n^-wZmw10+8Tw#G>zhY)suJ;fym9IBrpbVszw+jXSq?*ktF z;+U}R3xP(Ex?o3&O?!gOX3ekd61N5n@+m%o(d}xQ3o0Pbbol0d3s6M6UI!CUHXFdF zgq6ua+kjJYN+ih%;y(=3pnrniSpsAlXwKYs=YF~I(=u?-ZhLr8QpQS@lmpUsrS0n6 zm6-cC(tsagtq5IB5k+Kq?_d^PSWI+)(r>(&DKhZ$*f1-eYaZx6C>}*KtpZ&wO>YTU z`fX*8KGtc4OV*lR6Y8P6Ap1`QBSqRtgv2XBP2DjuF*s-Bc#FZrsve-vMFa4_Y#cxg zDDM^nfM0s84S3Zo2TC-|AC9{YsbZ@S%gD*W1rrIrE5oI!dxm9ekoA6Q7-rJ!B5~hK z>&lhSLl?V?3yjdc!F$iCTC=ZSf;jp=k87V@Sv~OX03K`&;nrcn6OmX=9-&nF0=KVY!UfR zLrJ9DV)3AV#z5~YXyyf!H2AJZ_#W}Z%kNFWTXVzMmOSt$5&U8OvK@d0`$agfg}N1X zq*+cqbaQ281!zv+CSRMj%qbj%A#tS^b=vpuHxHKE*i3GYaT-6d&dFJ-@kWoHGgX8c z*F{oCg@tY20HD(2F@A1t)VF)8igF&)uKnw%r@L(W7v6Z=-N`KX0k8vSsYTE0uzZY7 zjZ4B{nbm})XF)T> zGc(T2``&xk`qpx>SdKdUpZJ|~_TFdj!|J7Hek*(?ZrROVM1_A2zZ@!n5@7za{|i>( zqXdbdb@dc`5#vu83rT4--}6O}4}ER1N`6yk@U~_6cMujj)llYGi2Kmj8O3I1jX-eq zjac;*rx!6X3Y1!kkE<{-y+g^nO4hOhJd8HEAI-O9Ku|@dx@rjg$RbwePar8H5kw(9 zpRqPwIMuqNPY=R(dDWy+5SbnmNj22=`n{_8mj5xKyQ69dz0JaO5z4`0-B#Z|)?vdw zu8+NnF8t%eYvKl3;O!%$iZ~G zU}r3p&Q>tw`KCEc=dW@3^Tn$DnNYJ&`4Js`H^$~)c@->34m5BA1QK!J(Od4Uu$KGg zl|jm*Qmg<0$_3bW5FAhX|D(>*DZ5DL5#$a@mlAl`8ao^KLHukDni318Y+`!x8_t?j zvb-P7z9XT_tubusYM*n#6yILbS=uDE_sUo+cx-V4;?j6E&F^@B13|zjlCI8Yo`RVN zTW)UdYo47HXl+~QuFtiW{?Y6I{bfA>C3oM)Uj-x-|5UO+zSDLLKn#)@z=pVc`^dog zi7zyN-z`oS7Az2dk7GFEHenb`zPAKNF!P|HQ#kHs!trX(E%`#@Dpqc8bxrYKtVyDF z#<}Bfp%-=Rl-i@pLSXkr*vNmhhu0SZpKMO(GBfklPb|%_5o{B91UP|9#JMjUBUXhi z0JgMSic{_a2>SgC*8aaZNcd6xWdL43l27&e3t#=G9Lw^c((9iLnMR5o6_hV>b=Ud; z+L7wB8-%qFcS=)`<_LuAyQ}_2qx9!mFI3cmwaU37qKD&rsUN1x=3fbBEeo4|eVAbM zQ;+N z&*%T@*MY6;xzextTt;Zz?{_}qcirr{ z^jB@caYfaLv+b0*TViYEF%^Ja>6r~5_;c5mbHR4a+36jq^%ntddx5Yd@O))M;sT~| za(ukmNcP}DEQ;$eEN&-R`M_(^Q*5d%HvNP=TPH_t*TlREd_BPW>aNow zDn0H*rXv14&0dhP1VpAG3{-{icVA?|8_dgAdm~1TYkaG$hl=X5r|r7Z>l~T}p_Lu! zFL$we9vbQtQFyyp&i7>JRyQZ{cGOkPvpF162C@Os-zWRZ-YKa21@ zu=J@8v02xYi9R1fg;7}QzHx|-ML?qQ^lnN|o*wrv7-5ih8BR>IWtY1qKfqy^|INY) zVseATZs1pZ6D!C)qlDH8qIZOkwxv`wH9LN27vERh5?nBRU=#cL@sadzqq0AqFWGg3 zb(jDR@PAVxBxFFVpO$XXy}U0|6MfjBYHW>kYmVTE=y@%p!09~%{hVA>m-R(Gg$Lb8 z`gmR5vhQv5JL9Smj5a``bjVr1pzn-??eDuEKx2^uCh=tkE~7RjE%0V1Tcx@3borN8 z9Pf_W_2l2;{iwclGWD2v?qEJm^s3{Y$F3_h{Bua4z3YmkTX~=CE}%Oa>{x{B^Ovi< z_C%Trs}COu?p1;sb>uO$*gwQ~zL9GQ>5~d^7-?TmMl8ij=cKeGh!zZY9%B=Ccr{l- z4;A;xZgRqU>Bt3Ipf-3jlJg66HN_O+h@ac=v~x9f8+Xz40o<Y6B?5#=yK z<)rtfnj(=M(Dpoc#cOVDb=;jV*#MKn7Jc*7heQca<$YJ~`B~sWoAWW-X7b_&u5gZ5 zVXxd4vj^Aj$(D%#Q>B41$WA=+Pa*NY-tTmrh2}lrEnogzz}cm>EzMBS=sgrdDZDxi zRrzJPbD#VD`-yByZX!x}bOUi0t;~s6qS8}&ot>-S%&b=GoF{rUQd`}1E>UcUQZNV5{eMpK3>N!k zzs!uSYm&Hso)iE4Cmf9D{;&N@LSNg%XAtCxO{%!d11KYzsAd`& zg#o9X(nsyt?t|oJ?TIEzJCx2WRZ6<2OFPJ9g#bB1L+^5DfG@D(>1Xns_=S!*G3b1- zGH{i+S75of^SSEM-D*Sda-A77zH7y_4{pBvmiv1vqvkzLR`y29(`hiY{4^vCj09_| zvVtqLcYSu^jsX}FNqFQMSmrbzGNGm11SMl~SxZCXyW5kB*T;j=hvGkE!5M^lyfBqc z2tB~Hud8o!R|p*eb+4sdDXuk>K3vE`MxkCY*~d#G4Bw5@Syt6`w&#C=6E45rLT4SI z8C&mdp_jU-{`gfF%w0&wT0LJ*yx>*Ha`Ji1vF&nZugu#l*NA;!kgjq0O1-9M=u`50 zy=ms&HEW>O#{9zqGyEdp4gia#|aJ{?k!v9&tj?kAWjPv203M zuLjF;{(G`HTCaGkk-CDylNYX&3^s*XTU)PZ+_*l8IDb2ZBFjlgE8@H+GCHDq6@A=O zhH&|8Q@urL=8e`r=@=L2-RE>s0MX8;#l@8q9=Px&81sN?4ekR+6 zz45s==vg(8-?kf#gVWUV$`87q*?`S`7`#fmOjpXslb z2%Y{`s$c{v1DsyBW!w$+rT+}8~EIzb*NqXE7 z>Ma5fPTFYkZ;P=%K4G8&C?ujPum5`o`Hw3Cdk9_Ew_Ll$eyHn!p5Y-+NiPf=2@*^- z&cAvBHRBNGZE|+id4|TT;+NDJ-zO0Xd^rtTZN$m@9UiDFWXT{z4@eewev^E2R@I@ z2g5}s^3~tFIPnmPC-7QByiV`O_u5=)m3a{e8NlU|7RPj5nuS>G)d*I^)v)YEM5&t_ z8prZ3us8{e{rSlhr(g@U?tTc2b;^lrBc{3XtMDGzW+w^bf47gYU<>6vQ#ADTzke60 zJS30MR->m~?^R#vEOlKmU?=p0!w>Jhobs)vhK3~n1L&gKy*E~6?J~~MNnM@KEeVwh z8TU0x`5bXL=rM()^$nLgm^$RGw+LfOB}f#VK{533sew+quybeU`JwUBCv-s3i#tpL2cc#v~_?={2S)GU?}faWekl(T@|z}xHf?GVhc=W?U; zyax3e!)H6!!;MIwaTP%Qq-TNe%z6kVUvI%9$0IP}8`R_TE0QMIyLbIgU@fLY8Sub3;Fu2m!I2mvPy_AH_BH%@1_Vt7vOR^JP2TPw< zMgs0bz4*g>)xvM-eaW?tJLcCXA7@gmkk~`tmZbj4_WNo2TI7*bPcHK-_Zi%>VghF3pwje}PBxz1s&&Q#(=lb{I!nA(2oQj*Gv2)avwrtjZJE%Ca zcutu42u zZg_a6icM~fcOHd}j%`{08Di6-iZaoz|3Tr10epCMuMsbn4h)sMN8-+T8EMhGO!MTbpaS#I(=jGje|0pgMf3;C!PrYRcpi2E- z0MG8sBp=q�EVD;#Tcvdy)(GK0URB>$~`m58Ga=ojtL&0maNf6l6Z-@#AK40r`9z z!DKDRO8Mgs&olokM~le=N8w3uOH5jI9PxyQ#_8>yP>eL`UpI3j&+)#J;jZ`sX~&aZ z@@9TpWxHChQti)^sTe_6#2X&F0tkzc{U5)k6w+agMJ)I7&*OE&cwMd+2oW1Pp5V9& zQ$ZZeDC0DuP`2}rVW0{b=bn7Thlk{yV&b2-IOq6kbnwfbdVynirlLz8DC=+}w~SOF zZs`4^<<2-T9i}XIyJeHBkKG`hc}W&@lo#Pdb4Y1}AjO7Zc46@;{ZC*|mQe0}@WC2L z_BLSqZrB|?S*>HJN(VAFkOFUi+yHbl5JhtHi`^tcVIK*uQNjs?9~zQVfo)W+Ha3>z zE(EmF&CJiU!&yhU4K0kHp;oQIpHU}~?C`i5=q>OX@aY!Z#gDs|v=;s0VTfu!S5CHV zYDiDE1EFRAhw4wIH4$T@MIn9fuHS4;ASSU}qw}=0^1GWiOL=x>bapL{mKD_;E~5nb zwD9i)mfuNLNFa-B9yt{R^xgo;sKFj|TKvYfaf6j}V8ndJ4z9_uq%2Br<*3WLAT;!~ z4{6|JERr5O`o(5v6VTT{D~?no1Es7JTsfIxT#bT(MBn)5v-2ogOR*Fc*r$MX-x$Wv zLLZw2D#zLKiIuVe8qNr~c=G_?7!O>P7HCf=??QNF!)>W(XM3)b{e`T^OSVV@hRB^h zKqj^rBr2|=s5!ZRlA4a7?B@6@5c2eXW=75>_S~QD4x38mJR~Ivb4td z$}0MT;;e}!>VpVhfJn9( zj*!B%Vt#Xza8(pi{d;e>>xpGFbZ5AoBJx$Uk`Geb2EEt7j6mgjUq_a}EPY&Q9aPGau^(n`cM>2VstVBARK?{f3ZScFr)8 z^vYma<_Pp70$ki~@$LGmaB4xN!ro0~KxYuW?Pkr{iB|fzrjw+-XP@vorVpa6CUmnw zybw4kO64aD&P^PkYtiW2y%)9tO3o$0r)Z>|_{LwKX`?7!AP{8A$jE5L?xtF=cusNx z*{=a}?Dsm~OG3KWiRg2`;OXh(rQ8tT+S(90jtcb!4(H0^JBX*7)^R02} zg&EnOl`-uX9vCK`Xs>}lXzbmf3N;FE&+);Q#HXX9BMAjgh$EoH@R&qLLwo0|y$BQ9 z9he?3Acz7S#44j>D{W(u6;4wagCrWu@A>jQ*w?aX6>2e>*v}}_od`waX846 zg{@wbQcayBXgCEl0b<=l5&d8*U}ib;5c2v1C^wPdB{8-uS1#4!la;FDZbKl~dKunh zPAU%&WNONMZ0De@4Jhau#k6a&wQ!^)vn-3c&mnn3D|gy~#*#()bnc#L*-c2Z;E2<&87 z<*Aomne0p#UGrgD4H-gN92IoKaD)zTk{J%sMraKNQsN}5R1pqd5LoFh*KkR3;yxHa}fXjhV>;%Y%Y>bw(ZVw zA9PGfQybc?U$dm8$&zPAr5PrEJ(Kfi5eXoZ;p5XLzIAe6H@;q*>5ETOzwKRK5qV56 znU7&Erguh{&1IS%UNKptO%tGMawA?1V@w&W+w#u{{K(WNO|cUN^}|9Ob6$Ga>WI!P z8||Z1nONf1rnei}zVEJrn9Iyt3VaU4kGe{N`J3b5v&sNxs%NWcNNv3Ti+=nG-|4e1 z)Oxz?^SOY>r3BrlgZW%gb1`*Nl8=?OL_rYfkB`_^%Qy0%LI*kXa-oSBhvJ+IDy2RGjMDU~#;6W=qOaBH18}dQ@{&@M&=VEACqbvC=+= z78K{^T{(Jz)_x!2F)w1sl8HTH)t|=(Rl8RR1qQ8@cQ6`dNDz2|43!zw*3NI;T=;zs zov9({BSp$fL`jffc*1k(GPS^iXQQ1LsF(pkQ$yOS7wlRC&1?vwB}^&ZOQMKyO+;!w z&WSL?F_@WCsW9acH725EVJfugS9yNcf0(*4EQ(~<%~Agm|Hc|CYtej;K1MWhsqYqU zYUgrVQzCJQPjgcG?2K;4%i(W=xbCc|O$ts7Ws0^TzjM@o$FTkts}9F{C`zn>w(r#_ zkqkiM5e-3JCcXl8~goIyDm_&lb6=05j;%QX1PI*}>h!I-Uq|sk3O_7XO{*u`>!x7CKKW^?u#( zM+1_epa&8!;7D#3*i~Ut!AEP5j@?@?bz>XC$CyXt5X)*leaec8mbB87Gh`gwD~;Dw zm>+kF>*f*{Ox=R?p%u4wKKqvt*=X4NAr<+6hGeClKVk*|0;>!;IX+l(Ur56hpLVpl z8RO4Rntp%s#Vf2mx&P7o{@D#j+2YiC4G+7(Q^0Q%3>V$ZV@uaBiMEfkwP45epkRz6yPVaZ`9ePRNWT3v-R}@BRFq`~%+Y zczBY2nN#+J)!LhYCS#x(394Gsjzs4qh|2p$(T=b-!t2Sn*FqMM&QG0bOJt2P65#YhBXo>2^@E z;=eOu6VXL+>b_oJla?oGofP7b%c-fUkv!d>MPtDXt8(ASY=0@NjzvI~KC$^za(4@q zRoi5wicybhffX6CB#lO~PdwI60{4L1=eEp3P!$A_VK&Sf0hRKi>Msf z(dx9ux!Mcz0XUIka0;m1zMaTLp7AbAXHqbY(Jjsa7nHBps#f3xddEDIQ7aO6Xz|$G z>*R396h+p;$9-6S>gnpcK2dDvwF#rR#ZslWCqoa`r|RJF@S22sp-Xa;@x#43G+~OO zk{SqjcsYXZqCGGgcev||;{$6RcJJlN0{|WvPBx8&fGU0L9N#b^+;@D0FE!JoQ){D_ zHIY}ufqZ=jz2A&iVKO4K82*q0NCz*PVk!4l(#zxIj%0CZB%7DYYi9+3{K@(9~O_^B{tS+nS337AJ>x!wb9kvE$W?x9;>N-l| zW?w-Kn$dFjX88MH?BNY?rLXw)vrX^qOJBP{3HJXnQIsy>Ahd=2FP0Fj*qt=*&F4R} zlqcLc_M*+DVFcwbMb;$pl3|``PZ8@acjZZBUYUMRq~+o^vE4D7QV2=|ZlruXn#)4? zm3W9s_G6X9Y1rVA=vzQTQpJve zdDw(&*#;faWj^;z1dV^h`Gh)*}A^2GW>Dx*|8V* z2nYU_t_qVBN&0;mDM_}~=)WpM&olqrA+4+*WtaY!o5~u3;@CmSbwi~IM0bxDS0+GZ zc>wd>L(t2jDGCvp7jy5j1Nk?$yh$^$ZES4JGdiQ2B;)6spKN)_A5Q_z-;t3l&6xp` zX%hfvXo1xP)+<+TL%zBZ4e!E}mSc2=qfkoTn~JCKCw@R2&r5NXT&$=s_r5(NMVrO= z^OnZTwtb0go8z76oz`xh=Zifm^Qj`2G*VEAU7ql{U0{UYTkL*DO>+BU-IhHHZZ;+= z*KTj1_~0L@yMHB~{>M~^42Bd;+V2!NQ~M>~)RO&jRC5pSOcGSCpJ1_;mfWAdZVWwY z$)2F|LYF&dqgCJA3_O<-@`i5vrGd-&%5dJlcLc4Kp2)f5n@^Ji#V(q6q@XGKJ;J-d z3<#+T$*-yW!s;G@Fns$ecpLSBr53UsyCPij?K2#U0we(=J7JxXlnQZ2+8uU7rFYQK z@l>CaJtdgUOC-IWVn?5L(iDMB8?7D4cIlE_az@`9(-(rA#$E#Ov7`=u+1vd!fc;s$ z{_ATL3L12M8nSB@`P_12Zp%a0mjKvbaeC|5P}$uUS3m7uHymLy6tyU`cM~btt$B%RRfXNWh zid^d@U<#21wP6^^$WfT&fHY?nVTTrEl3vA4Y(2Sqz9YK#O+;I_sX6Pywg;yP$9|R5 z4~jHH<3{uLR2(io6db}T9_9?uPj@Yv3E5usw%sxKv@DzBi*+K^(p;k0;H%_YUMc#2 z+`+%z&QKJJj&+7$=!)eUSYoR=wVx#i+HZ2c9}4dA1^)48Owha~ZazVwS2nr7jtmEf z3Xfqa({9np&8E?(;}vdq^q}|o+}d*8|BaO#1{N;Nck%Fe-(`UI22@Pp6ZJ5g?1(cv z74sr*#}KY;+1(ridi|XxIVl8BdK|2&Zv>PgYZ(EX--8FS(@Bh${mD-ozu(E%snL#} zF+11E3Jt~sHBpHlN(lP#=P#yIG8}yT7l7up!s40Gd^-C$mN63qgBsRrk5nm0T1JlH z|M%O-N*Mr&h^kNtUBtby522T*e_e1RH^xN@n$G-`*sqA*XV?B{w=8`fwnwsW*2zzb z3CjZh`-w1P-yC>aMw+Z+_-l7uv|qF9RaULd6^LX}u{vvJqc6Y)fiQ$Dn%+$MBVHSd z-m;jZTC#F^^VGCyj%DZvO2>aCJun(#PDWOr?B4kWsC_L}ZK|wE_RZ+1((G>2iqR2Pf0V6*34)-k&6+f zjduDpL@nXX*_-;z3oLVE!@UF3OUbwJ&huP6#l%RPE__;OxxPkFSa|6jsp~NOluQZWS8qX$8Yi#S|?Vvc1@^4k5?6y!L#m4<2N< zr%DX|@P_JAvbfPRzpURm^*kyP=dvZ9oCMOG{l_n+EONL!!8y&2q2{e{gu^(m5jsZQbDVRjvNF{3BMV8$0&R^m{;7#*zw_CM)4KUYOPJ*nY6{bXC z))OFea^#OsPIBDVCisIyEqfG3zS!jN+@>7l;{q8=6$Umo{;tfuT9jX~#zI4!J_~_- z=Hqw6aLKg;7EnuNaC%UfY*53-M6HMJ@ym^Z7(NFQ$Tp#Mm6*2n&_T)F{jCv#Ze4|-x@YP1YV3;kp*7`F0iY-Me?g ziPiz@9S&wM4_gb{+ANZqt_VyKVeM;a(LW(ngzG#5l$ML z`uPLU@->W0UG#iGr}EdwaU+Eqw3ZqC{m$QTD~V6Q{hzv$PCvsSeV3!|b~T`^R@b~^ zr?nUB^HH&|fLr?9=qZQ#iou=#NlL`e(92pwlzabOwXUGCYteN#TV0v7L2D4f@ECXa zb`jYFy5SHh4VTnqAw=@~vvsTn35o^iF-C}3P~11L3waLi zfI-uj*EZ8k>_5XosYi7~!<4R^A*D{Y9{zBX!SbUF0in3_LV6H-v5#_tCke;cEH}aq zSDx&S@?SuiGQRf?#j-o|Y3)7=7XItnJChXNgJYn!Q=;Wtf)4(#QlxSv;1Vw5SJHnS zaK5;;vnU6v(>|z4LY6T>m%`TQ5xDDTXIkMn)Yg7lR9?Ormi66rg^fNRj|<$Z&;R2U zluWIy)F;;~zFQ!t=V7ye{i=HW!8l`25%Z)3#o+6n;c+ya;z%JIoWe9nR!A#Z+SA9e~UmEG6%yeFBb^=Ibe_8EFm~ulG)!5U~XA<5ai7ZE5qY)n#a$ zExpCj``!M);rn9>6fn1q2Q0s_zyDG2n-kd6c+ech?6}ayta15PrMp9wU;c~h?!XXi z`=d&&@(T&wV5x^4bgrvG7`tNne_sxJR@sJX7GmSS0m(5;43bTfGE`D=#si#41}QGc z4-Fek)k+djtjEdRLc%UQ+`n1zN&Njc{+yBEYc>@s)#YT9>DiAsI{CxB`DL`|IiBez zL*XpBXpvrYs-#qx``+mb*6BX@{!%Y0h>1T?*hJypUaOlhaVGv$`rrQ!t4zRt68gon z@)V@a4a%`4|Lq0v*LCj+L#d=5DX}YDI)i$T_Rz;0gPD^3T4K7DZmRR;GJ?eWq z-GymF@m&fN1H)o506<`aJyTaiH#p_*fO^RCE=NMlVdly9@~|!3=F*Amw`rL za!;1|_v`iFy*kpaS46|n?KZ1BFZDO-`j%Zn@|#wwcuuPwM=6NXDY%nhy|Cxe+F)Ut zo@NA^4tgS<^PGe?G!Hq~k_29!*~9FW=S_@@OUlu(5^|C(YNrN^IG62?puDt^Ye{A( zVQa^*pw+_q#sko{s!m+o)VhF%^7AoM;eEh*`iD@&D5Bjq0shZBmT{bWMse9dsPW}9 z&#oMGN-H8Nd`VE3y~IQ1Bqg~z1$n~aL>UZl`n~@F1O-Q<;k&;-*0W@k%)_x#hDUQz z)n+M$st*)OkB`3sdo-yL)Sff@>L0_cv|;Pfw; zms$!euZ*`P+$(Fb@}KEjkJpQ686r9F>@Ih!`4$=~6<9H<|MGjYY}%(x zi#E5}av79@=O!gR`n79k?IM7~B`dYUo$`9Yx%)xCN${Trn-yscaM>D9|31;FgHR}r za6*Z-oH&}N`Heq3P)*nimfU>2dlSv9|8}SaQ7;QEw5o|Q6)@G9OBJzQoOgC z%kGR}`;ul!W^hs z+El=eLj8fl6H-+sxdOv-i~fc;QEg4P9OqlutF^ajgLBvSzNhk*dyk@^V4?kqY1yF7 zHbo`?)A_uxuuw@tePySN;rHc`ZDb+Y`fk@)p*%tQ_wQZ6t=v`mbYZwqwoajAYPdfA zn;n!%&l&kLnTM?u)KZ?#VH)-LKEQh#qzRg9624%}(k0^}k*cM5@=5DiBEuHdi>_i^ zeQX~Sp(2=xEn(BNEd%<+FBC_Bu*c!dV*0q&qj&NEq1UBOg(GCqRXgz@4wPb6aW7b& z`U_Lz|8;Akf-vqCop;`R>p=G+UALsO(WSYc`}v68fVDo`odN4Wr<=29`NLuMy9OeQ zA2L>5ll$jgec~_a6Kd^w%`vbnYF>QOC)%+$_L$_*EY-m#_Ijl2${QcsLF$Lc;}}`r z&PX~YxoBO^s0R%gjRsunsZe*!4rH>zZpb}rC#QyYlZCDIit>cCGIg}yg>aFStRscA z@6xWbK~@>ef#}yvUo0zmre!vu>zC-Kr=ssN1ln)yc5SrOWkb(> zxJk$R_3Utoms%P^u&MavmS$dp1;@*q3D3#d)qv=pg97knG3Mjc+S(duL|j5BI5R`M zIkuTq7)hw8-agz?hB5zqwECcJ?7c4%X-Q6lNjQan&Z#~40lMrF&Ba;Rb6qlCKZc^w z$-ECARF=q`c(RtyLx#FpC_g5Y&HC9-L8mr{B`1p4iQAPi_9Z4(ffU&wk$Tf}yZL)3OGN}0h!0c0keNWd+U%pHV1 zA!Xb2T0uTT?&%z?ZHn(a#izW_vC9pft168C95}Fm896ngF9PFQRa^yXWz@n+*tl@%Q_{7(SeZ~Wpihs_4#b@+}jKVBEsh)c+Q{et%pD6X55?b zR@&c}^dNXzmDM9H-Dp?vvx;_c3NO8 z#^bUJ`h5w3`tgVJj$xgo^p9cxkJ;%Hi@)}UmefuxvfNmX>s~c_?g(DL7a&-$yaOwY z+2G;A`C%BkgcgL@0GXTk8I<>Im_aweGpre6(79owAkXug1TT?Y863Q=z>)JnksbI6 z)1aPxESH{|F~redy%9JRhMqEit@X3iw)6#(E9x*+-6)Rq9P5S9VfwE-^|kk5>K-Tp zutWdhc>pZQhCVvwPv;&*ep(v3EM+UBv3tDn_NvZX6^C5i!s5PyNrfo+WI41~*HBuI zR~n0L+Qbt^x7*g(VWd`rR9DOpZ-vv1O*0(#grb<-p3$k$tn`hvj|xf(G-{{nc?&hp z8!;(@J=m!dUYb!{sGMl02VMyb-0`POKe%qjjWn8HzdXCX(Xt|Wu28ks6qkaS<43XA zY~#NtXfiGTkdtp8XI1{Gv~uZMY0~lcL1%H!aeLu9r~ZfRiE%KFvmAd2w9-3AN6ILt z(`38q>%=Ggrj0lRcuXp&>(COmK~cttMlW{>&7}h`n9^P4G%e`^1&ET!G<68N&$!12 z`t@9+iZ26ieni$i#$NJGuZ1+cq*|65Xf|}--R2jwpYm@9$r-d(&Y{l~qGH;6W6%dX zhA=d##Xgo;?`w#>zr&`nYNwv#i^eE=A)Ick=1!!Yxi_2eBhed}GOJ|U0|gDFtJ!CXFx9d=}Dl{6J|QsG7ANo zz`!%;Hpw2)nTUSf1|MObyB}mnXt4|kX{7Ekeiis#Pkytu*<8?~=$;d$Wcl6vtt+6x zzE>QaZ7m?6#Sqhfb1mrtumJ;I$N1+jbzNpLtme+~bez?gWXevn z#w09c?2G`a?h*EoRD2hpu3L)xNWTKxyT~f|3ieYE=mCW{{cCFc!isbCAzwpH!|Ptb zbNgc!r7%9;_kypm!$^hv3iaC1aNT(Vx}vy0xNOl3fAWo^lX27*#pCtyTx35^5_>~3 zTx^pyl_=~oX2W~hsFV9>gqDl#xZ}zN7Sh{g%gRYI<{wn zIfY5T=R*bKx|t1}e3wAv2;1*$9$}SLQ6H5O^##ZMD~dUdB2QS}qhv!hI=g7M|7O!Hy{E!$(`1uiFZ&2$|ZYKHia_a{>X zpjR0v+z_ul8|pHu$B?Si=3Gy$QM9R_MA_Zvc6OtqZjUw=i%7p(2CYy{7@e3-{f6l>EZN(u2@Y3zm88H7KNy^$05xV?;6mS|JajEp%6Wu2hIN^B77)bN3m$%+?IbHY*H^RfHF*T6UASNvAP zxVFy5wu3QJOSdYPE7wFKC0o{yI<(2$Q=>YsT=QA{Mk?j|Qq-vR);n9ygDR&3R{Mvp z>;X^6hd!A+)-Fr4xBX>_kts^gr-OYpSL?s$x`0oB7x)_V%>M%rGbo}=@7Emsq#b{e z+t;M1=C!2tQb@J3vJ%15c;eQOp_OZ=H~~J5*fyfC2r8_B5vGey9yWm4^9ukQyeW1W z542Y26=+Ibmg0k^JMHF*ZAL!jPA#vj*w6!Q8wUr0Iux>4b%W(Ns~8lBuzk|zS}({r zap2`@8&2N(*oGl4m~n;FT*md;MHExEBNM~hX)eznZg*Cv2AXz!;>|WoJ-Wo^cYl8N z3fZ$zl+B{fw4m%RJQC`-1xBFt&Me}2m7lVQ45XsAJa=O=?| z?S&$T6}&R{>6HB+d!6gigYS~fehci7+lz*F{KbP(^1;M~zaQvhoH45>z3;((4cz@Y zX`%r7*_kWh(n;rokpl642m{3-blfc3wUoTZhAddfiw!GIELQ!nOm^oX1tJ1OJTfp% z_PTGDIX={jO#=Z}RKD+GU+#zuFqWIhMr@eI3ciet%+-*`JVxcsa1G~RJK_V3^(CXS zXHc>9?^Q;@Kv!}yB#YLfac`~QeO@~xE`&n?{bqHT*?=BiJ)NihCANPvEwS}ce#h(f z0g`6uE|#%{&!WO+6hcH@0toC9e75VP|x9*_A|$ zF;W~3W9>lAw1k?y<$e~Y_;#IA6;fOr9Fns@%<%fV;Ui<==2i0~rS8*#DNv}P;bXhn zNb1VcK9b@WQ28j)eM9G&UK5Oq&KuTDj*(ot}k-^{w5&#G^>>*0WCk zvlmBB6QE^)%(i7XC#v?l8;_#FnEH&s(pI@3)W(aQ;pBw9>+22D1bA3PbPKMz;CTm8 zWRzhIr6AGJAofbGl606+$OT1;E>|p{B(;x^k6tlx!FoF}F>yjs5ua50aufa)oXm>^ zK~>wq-lqFr@m3_vv-0vGzVdxB&iC}bN}d$(NS zY0C*sAb5DHueZRG8D-??`7q%t=1I1_iA+nXL;^TX4NkN}A@&a@--kml2P*9cScwpR z^-ADjNbp+XC`-T`n}4%EBCUkQpH`qUUsk?w5+PgHM!OzO+BoU*=6(I3Xd%TV!}k3< zl|?XSuVhsG{G!5Sg7oTCyt{t`gDSxoK=#=Dc-z#rJ%nMr1}hjn2~~hNg$WY)dH+;O z^m+25%46&G#{DZlDWAix?2x;q;qCPgxUY6Q(0IK$N)fU6{6z#KSlt7Z?iR88JQ2ds zft*?eyiQY+w`tT|b(bH03Cq>-wQUI=K`lmgS(EPGdlNV zz63w^82o{5X`O-w!m@Y!S8onpI%q@o}8*wl8TWE(}&k z3lCkek~04gx5ghdI=UK5-#B!5rEm0SzvmqpD`F+aVE!-CL_51>);=);#ElnsPpi{} zuget}f0@;|QRY0Xrc-iNH+Qrs_u8@drzb(nnthUV3TO_b%}Ih4&=4y`;!_q-oyk=5 zFf$MS==njylA|0;N_3RTu-3kbE)b%VhT}S!XnJ?bQzR_ZgwKp&NkqwhAaw_< z?gvmrG2FL!xOQKJbFptKpv?z9e$ZctA0BI6LBMjt6kw9Ty}_-mO=4N7*hZvNec$Dl z(QRe4Ux6@c{F>o+c=2}yS7q)32v^T@_D2EqVefRAug&V$JK?vQQ=xwQ@jkLlUQRA7 z;oD$1Wi6&*2O+OZC6kBejm{o&Ns z7g)D=dHx+>2ShdnJb{lNV`7bh8Pgv&ur)_XC+qY@(3fxasPkHm2!EWAtWWm|mS8R1;W;s^*nQI^a94Ev46;~!y zEf!!I-bj(I2LGO8nG`{|H#YouY+TM=jHokYKZKj@%hql8x3kaG)#5L4ULoUJk1D+R z{v`u0b!dXry2^8nnF_b|fMyH`D|Qs4uQa=cSl8$~do`+G(Y^OhmN}r0HHy73&9^&P z_$(=%{KHD;=MO$@;$l9n8##qOTHNuM_)ZBQ#3?f@nG~9d1z?cnfi%{8^{shzMth;* zdrh=krfy^^`e*=;2oa1ssukaNBfzi!df`SLBN?97Cat5?CBv`b<;~5>M9Ehp-00T# zBh>|{U;W5i=zLYr7b8exFnnqiSAR zbhgK53}>B@e3h#^*0FXpKIGhh*~im~eSDr^f|3D%(3Vd|*PhyPEFCbHQA9Wh>U*=$ z_`QS=_$Y2~wR?=JU}EKnQfw4pC`|)}5eM5p&9E$V|0Lqe{RTXb#oRX-K`jLsjo$%t z>dCU3tVS3Rv1MG#rXt2>AZvv@hY@)K;zyPLYiuA@%t`5RZ>=yb&Fl>b8;w}7Wl7UW zu9nBZwm=0PT9zvvyVk2%P!unnfR6MIVu+#PH|3};aMO0=lPU=z(e-n%K#>?G=)?u& zz_#4IWE20hwy$CXds{&CsXX_WYn-eb-nKJQ=-U+N;Zjr_7m&UieabIqOV#vCkaBx% z?Q;!p-23Fg1-DaI`dMF@IO9_LG&gL7-i(-UulPqg6d=Uz-&OjWi7W7D^;~dCjdKD=UJ=KLc&H)yJ)sSkbF@Tc35l+CwUf zKiPOJ^l^!b!CPmidom!hRk1`?%VA7ZCd@*d#gJxNiTLr?6DWYT=Z>gEVSQBC{VN)u zW>B3hoR9sp3{Ez~9~66|rk#9ezCG^&;b8C$OK|HSn3x(TmiGtM3SR7ygJRT2ESjfq zeB4piPyuQ~_33q8PBye1B)}EVxRy@5tY;yckZ*%Us z<~eoaKh!OTr2Yv~*&v@Iv)~v>#SHO$@-Ddc z#R=G(IzJn_2}`wzX!Z3ac@@9*Sv5p5Y;yel3pB8KGZS$-w0NiBYx!2x7epaqtc_(r zEd~nn8kMtd&36u}pzw>Az4dpf1^nQ14uE?!q_sDeD;8d*b7fn5f$4=|M4g+cg6W+K z*Y{kbh!$Kp!u@Ea#AsJ5LQNq&U?`k8#?-h2j`67j3{Y)zEHCxta+HNTt8JV+t}@Dd zK+0*5?Y}(YqSA4<^y}K%#?vNW26_JwpGXgiF`U099q4=qgM6nvl4l}%E~8eC!E*QG zj3}HzN*Ok_Sna|HFekt@K3^w@$s0t{(YKN$Eg? zWPhiw#`yHmscM2a?&!JSyAFmUF(*&e)Sw`NNu1AiG%0Mpt`V#O+ZrgXwt|&@_t!8E zNjMY@8Qn(ip-c_UYtjx3*q;OiF`Svv6KAcsrUvb~S=98#51lyCAwTOkC6vZG@W1Ql zX|`HZTb|xy^jhY}&Z(~Q0|Dn)P<`;#e%RD|w`x~?@R|ng_BqX`pl@x~k~zr^>Of2k zA5}RG!=k0$au(La3C;#1V*tOry?v4E3NV(}UJ7wa*T;-u2aVwD0lTOY&gYK{N256vE@fdu#Cdll8%t9Sf#fBG+Di|2tmPfQQTD zw_#HVq2-mO&;NS`K!5@DN%oUlZJ-b6r)6wKb8nimk8clpV+=&7FzZFhT}}ks5inPZ zaN~HD-150Aza;K>$>JcbIoOPAcjG>0>#=yF$pD+98WA9NMWlS&%Oz(TWswmkOC5g< zuG!pZ`_(3`=){uu&eL~b6El1&ASYX#pm{(+!sQ3sM+Q%|Pr>sDOb#!@hdZ$y?eb6A z0DL|TmMM&ZKzn2LjXm-hrq0~03$Zd`=XBJIBNaEsSAZ1jUULBft*-nkB0oCMYy8g3 zaR)tUOy&AIya*P~fgsGuWk!;KmGLQrV-qxB0vFPudf*-!r?AhoOJ59%omvdJ4Nmp8Tp#j(oV;SO^;WSYYsP zsu;e}Mnhz0Q~m;AtEzW+{|5-H4h7zT#^>+@2Jbaw)rQyLSvW=yZGfS4Sx(D|&CV6S z-A`Nx(^Z`*J{x{9gtSuW8M|9^-x<%YF01~YbnlUtCbJX{hX+lw!IK_nrv%k|g*$<_$IRaV&t$Ut}2k0BKOuiF;64Zx+*z(rj4L!WOP< zF)=vi-hBX~yu9@ORE4Yz=^Lv?UVRYa^aoccsL~ zsNw{?*U-_Ox3{)lz|B$vc+RlH^yUS^dT<@f=%xa#n(fUy=HBcKGQO3P;Cyy7H6M+D z2-z5fU>19e#*d6ro}XU(*jqgNaUU$D0#!)aHB+8-<2PcdWZZ(ulFH^CQ$XLvB9D5z z^6bdSgjj-0DVN&j+vd3y@eW#Y)3@@5{Pv3(W=y@`yn&5La%9CMjcD&*8`jjZC||W2 z&j$)To)EEH;sK1N8Fli5k$Kh9tTM6M^O%Qg0%*@G#<52wUzP^Y{nq)h+7FT!1rG-& zeCE#8%a2#$3ApbH7Nwi8*Kg^2xUYm0n6L|svGXG;jC(cX>GnSi2vP!qgrI~XNGKp7(jd|jf`Bwi zOG$UCpdg@hcT0DNNSCxUN_Xcb-nD1O`JQur?|+WhH7fhr&wa1;$r^`EZSY{%bSBZ% zIqzq4vGVUH5o5|TR5c##kLajgdQ&YT>KZ_@cJ>)(17avjhtWi@e`!iYp*nRu_c-15 zodA8F^-Ke*$O?v$NCyU1kHUh(%AnLtEU9T*RuW0M^{DsKTOYt#vs>#&!$E9T2{3^8gX@ppBqF1* zI}NL=Ir^ns!H*pN<*~4^CyfDB@-rG`s`!_%KOnmWhCeuOEW8weR|W~vS3RBL7i_Q6 zq|y^)IJjlx9q2EQG2#Qi!*G`t>O&If~ zdKN(@e{q#2cz2XvuDmvX*W-&>!Kk1<47rX#7#gcIJD2(Hv}#UGO%)!<8`Cbl<$j1X zK(hlXSFR*1g&$;%rAScWuQ+4xt#47_QYVUPPR4&@jqK%OQb|j#$QQg`cZP8*YQRAT zZ%N+QUE~L;o-(^=pP9I71AoOs0)|CGzbRr((O;lXw!~}lm~FvRenC5T3Ue1SGi{9I zG6vXoxO^-5^)@9_P|`q<@Imq_DQUWkCHM^ky0>P7giYv5}AcSf7jCvF4 z+r+U?MoP$5l_ z9|lQ;rN|SECjyVaLoQCUwZA_!hy!-P5jXX>YBXM)G;j7h=(M;jr{O!oeT!p11 z5K&zBDL+YdVO;`uLpbm!*owrFXtZ7+t=v`Y{UEtLZrS&%tRWr0YHZ;$<-UJ;b9%#kvdnpOd=6T>!2Y8j&u<{pM2}`X?ulc#XvzNyf8Dk$nxPh{5MRvUg z|8aZF%cG=BWbjg4b?QlJTq~&BD2IJtQ9i?Xns4jdGEQd9jf8#-A3QT+$X-WS#it5; zp{Z;fLE3yPc@>rP84|a1qEPNR1ba@lL!Bl6-RtC$-h(*lkEe4udL^55({n_hqdE?a zBWbwddDHw_`U*^=#!X1@l+lx&M)H}RtJD{lcjnq>`2(~;d#44;;nGv^w^Xh*NfSxl zN2}bO8%Sf(!IvGT1HDZMRqC5QPyM)vnJVLE*b%(5uS+{bmw}N^tSDcXAh)|xL z7*Vf}KUUj}vnKd_=qsW%4Zpl^Z6mkcf!g%!H z$BE~)hn-0t2hzq%cs$N)pF*#ElJoi!%EPLK^JSp1uSGo|rks>IVL~b@P)~|R842t& zMOOYljXL09|cI-4FBo5$~LeYerSHBVRBb>_%@;UMmFOjpW~D~&gjzNat0i1HS-`X>nj z71hf3_+bi`Eh<3knOJ2+kPx4ia*L7}j>6D zA0y@HYfj$)fSow-|C$5}3~#nW_{o*?&wwN}UfO{%K-KqK({v~l0A)IOq(5a0*(QxY zQdkpBOngX!0KY{+=x;cYIm$y8;K=^D;bdW925JuPo4e`AAbo)4vMPT!UR@?8z4t=* z;oW6du!D<8I=}noI|q>tuZMH8ZmrbT)|TS;j&79Zhqa&5GBI^TZ-Wg%WmKziYWkdy zl8VX#ukf&t>6%&t$47Z-?9*z`LhSeG{lWjZ#FB+X6GMAnG920<#4lXr+UjoKP6j<^ zll8iKB);iLVGKHYxKtbQnhctWID9CaqP_3`hiw0!mnitHrxtcJakJn7S{f(uF}Pob zb8p?P%=+njn?l<7orP}^N&FtDy@%b~&n_MJ%7ih|DJbS^l*^sNz1i)AALM{_&<|9~ zoc6E*~EP>)c2`RK0a=#-@IwX64bk{*ZMZVn7NUNlr-rq zX|PoUz9x^6Uz45G3i<#4ugQnE4L;p2^8@*$l7q6XTPNjXyyR%nD7BYGlcVVlC(aG9H=D4UiO)kJ%v0TNecyyk;Bh0IbzWrB9 z2IW}N-gf6AEym8`mwxfUL45N~jyyNYlPzx~)=TubVMBH;c9H+qZlv4#t-f^|kPK>3 zIn5Cs!^l~>3UR!OPYXSe*=LY>F^|+blwN&U3)$w@8xuUW?f_v<>S zp45c05ufr{>vIII1rKb^9qx3nJbPAt1;f39Y1{I;-|2u6izJxeCSBxMElv80E`^<# z9NejV^9p(4zpzH4vHxS0`0GEZ#C0#trR1$ddce|c_byi!R&Zs@Qf#a-d%{5nblb49{^5|4x zO$QrIL*YnUh+k#A$HGgKsS2Ok8-EG%u3Dnm6Ie0Whul`6xByYjFfo7`r5y}3vHZkC zSh@HH_xYESx&md&EGC(}yu5;iJkBes)Yk=E4nm>4N<(xLxt<6d_C*4VtbC$DIZ%L% z@SFF#-%vb3WidT53__n02tP1HHjSyRziGL+@tf#fpTv{PPaX(+bS>UnP|uW*#&94? z*q;apuoQ?d2d@m$nkelH2(e7AdZ{LU>*lrod&0UXBS7T%x6TO$M2ujBp)6hy=whq~f)tP$cO@3RZDHls5_pp!T z9yTAWNutF+rZEV`P9WOd56!EKKh_}c$d zk73}#@|3_T*r{q&wB=}V@A|_f#zs4^U8E&?Xj4|ZEs8i<{UF;JAEUJD+1K4w-#PYW zUGj3nbIbi&V)jfmjEs%{Xk|5F{`rsNM~g2pk+0?(u|(ZQ?`R=PbBN0e{_tUmt@EHx z;R7kN%2TA<{di*C{9uP~vqsO!6%!r(!(=dV0*D7InVtQ~q&{bpxQ zY8l|r2H_c?gB>ga_bIeT;WOzR&-CAN-a{OeOq1H6CtR@_IhF z|7hy<&OxLJk?Tg$+E2F^=TjbzxR4|yYB;rTb&8lR0FRZPNL9KFub$N9D)?!i( zl05fvB+P_X6yZ_qUZc6I4hO?N-b6Ta$dq@`qR#h8P=~?1nE4y&Ip>8q$ht4c)D>sA zN=O(4wc6Xh&??`mf`#Awd@e-(`LhWS>LUe%&Uv(ECm_Y7gCH#bv_0@UXMA0Ll`G1L z()BW8@TB^t30^~I=**1nJbGP4eOsHPIhqhd`ub&ph3m1K)tiElR+*yWr3G4Q{~b^sd~B4QJJ1k>}rjM`7V9q)6XTaVcpXV#@NwFSXqd zTJ*xYly2mzxk@L2KyCs^yaK{fR6t2f1JaXlq}~Lg;8_9HA$?-)xp5Mj2%T=yfaP%sTpC73rv$zg=>XO9t1d5`Cj!?2-=T<^nSKKM z?0f+C)Mw-*S94r=4sOvZbd6%cn+MyhFJ4$5&&hR~TbmO}J_I4w-5b(!PyRWZ$%BNe11I;=JCxs0SFzMi zbA9{utCOWpch=tn6^K zco#xWEks_b9*^f67fSJHy$sMSoAty$c>J-=16ZODTPN?$4kshXSZNOuIEd_mCZV>< zoAg^2Ex)7_GpPV(L!vu*(HMkiiJ#wdA0WY3x5TBEU$5m{Ti>;oF1u}7D{_Zz`L#M9 zP2q?zxG>l#NyD8=W%`=Ik?Pz$07x0xIgsg1-iw+= zKm$`@2n6(fWuKblpE-k$08DdIf_S}7jFPtymd9K>`fzr*?H>5Kyjjz{&(H2!;Z{y5=bvr(i--`Vj*yHW_5 z^6~T|Wt-K%$2)lf7(gC%DCdO#vakn0C|Uw=URR(ZNV>H~pd+=^UE)y_5N+Sd16i6x zq<-Vwn!BxCH1l5Abw`HYiYJKb9Kbmo?GJbM8*nB5dLFQR+~M}xG1y?hef-$+j>*X| z!C-OCR{6P|@YQIcq+nW?hwbtAdBudEpo^Z-hf*W{3a|PBXmgL?l?gzN5f$%WVDUH78@Y?DLPWW^$nLD zmdeTe(Fb?#JQ0}Nc04^@D~=pEy48>Yo094glZM0$8Waxo$Tg)#M9P+lV#kt$RXH0EO%sn9=$$ zN+52Zu1IZyuKN=$B>*yTriM6k{#Fx98lZ6&OB>8+2L7uUFvo5XrPd}w#*vK&Xx2DB z8}Gx^Mt3CFt-Ef;*VaVwhM-`ERUy$ck_Su4+5dpeV{58DW*$z|c56gCmwb_#Psrap zr2cg@G8V1UjkJP##E)ahdB#x?w{Q=-gfu||t1;-(%bBDG-@E*w zk=Jq4zGgWFZiaY1Cc#9X<{^xy#<9x@$b3_Xt*G-0p%KsT^h)axuE%gBD^i<9SSRrC z81m?BXl`LHrF!~_X81J z^A7}`zIA&ySC0D$YlbdpGy4U>GlIlZU&R~VnV4V|Rnfy$BDA0u0>v9Sp#I{d!bi>y zXw8pqkKDTNRov+1c4b(&!u940viiF}Wx)S*Ppqc4zMB2fZFY>?lntgr{QR0cV*%n@ z2sDElKG91y8@zTaSnilfKZKBkfm#Ec5bQUpddISrkRHFrFN z1@a=K+O6cboP{fd-YLF2Y?-{*_Ur?hizCnPdyb>UpaTC&F~yRn9X@058%`$6eZ4=@ z%;2}eFJ8R6r%n+!`$!_tOtDx!>W;idt~H}+U?K-uj6D(Hvq-^?SM|6>Wei|-9~Cc!le1o^KBRj|5kelA8*&2S0fD-Fac8Ba<#c0pa08xrNpKA{G-z1T+n{CZI(vMG;X62 zYkR`@#TVMvp6l^NYV*f}>uLHQe+p=X6v-pwJ}daJXciwm{xs`?{cW4JhUY(mAlT7> zNaOgXG$-&c;`3b!McP>@3u#V^5_GGg*^FiO@BYeAR-SmhiXNk^VFXq z|C|H}g^r zOZ$ng3#ZjFKSUlslGjn^m?R?Y9am||D!;xt#g6$>`|mT5Y~a9}n9O?~zkQ_w_s^p3 zMNIz&lEI?bUV4{yfE9a!X+Kv^9Q85^()dc*VKUrif|3@%C+Y z=`V9TJ-;4Ga9+V-Qf44@3uV0}hJ(n;5WH+LRi&d%$*WDuqL#nAg^7HT?z~62T`U4h zSC8}4)Thmb|6Y1`W*(p~6>dHV^1ks$wcSfwaMU_lm1RwmEnVq1#Bcd_d!i36C5&JD zx7rd$>OC$T13y)dgIjoCtPc)w132)JF@avKLe>+i$nHcK9RX2P%DHw1yQB99v@J7H z7RgV|HCt?I;0vZ|>(tJWqs&sxkFNobx_E^~{&gF7z)y?pR#kHAt!90O&SB8=D=#m% z(*jRAIfvmVJ9E+kkD9#`xDw_7nzmll3- zPW@G;03vJyAn&|G@f_2Uv#n8r+pu}Oj7?27hjt_qaW8_{_;v5aVG!**OsYX{4pNmY zjZXi`t@$YCR<&lu_1o14RNrGwPH%7Ol-eQBTM&S>B4~d)(v7 zxMH=GmHt_&)`{us)rL(LQM{&6nw8G7by53Y(MehFaAk+v^AX%Y0$VgJXh&0fT^zkj zhyE~7G!MnC|KIcf%z}hYS*WgY|6z(vL_rWz4#ulVrh_DXAMXj`In}-G?FXzOwu1I+ zk+$n&%0P-;*NX?PU9HZw@)ISW3YGiOOJr;65dzV=fTQ>^vlF~#0VLvQG}hugqk5TsbB+^S(?onM#pVPj20BxI3{6M zX$xgk8boO@bFe$_##5dDlLY|g$i3!C(40|5zfASKBu)U@*vu(WVE!miBFTDO0EdsC z(N22f+3?~OFwiGYafump@n!mKtOahE5&0bgSAs0tXo`pBmH830OET#GfMgKggCos` zpL5VX_#=-^!3k#N-yEOZ{zerQBR%u;BOG1kKTp(6DOTyf%+kE2;PWZ=_7@+Avx<*G z9Tf!HpSz+$sJaMojx#f3oVmsDSWn+!o!9s6)x8d84GOB^I=Ml~d+0j^YY6&n1SDWq z-0|Gh{ma<u4oby9nl;Nvu)b-V{ z8xMNiU`zIF#JbHX+?b%yd=7bzH$zV(UAAF8SyDd*^%A^zGivA^VT|vJNM zGFaPAx?NJ+Yo&rZY-bcc+ocpbt!cz*5@)NhFv1$yR*9=R*H*}Ja-oTs=3((EfTYa^ zJg<jKq`>fwCASrykrMnpu zJP%LhXz~HZGm^D>?#v5#jPqbFP)xxu)^Y9G@}_{*(f<7ohP0dWYOC)}1oG+@9wT(D zuoFFY8l~sjP%p7n3MN4u#Vn3v)Gy=xy1?KHx%wC`v5D-T1VJA{v1qe}1J_3T@rhLd z*n6rwmGco9?8tUdp?(#zG|St=AAPiX~YuPW7fY|+Xni9UyH7jJ^52tI5R#wmhWXC-6$bmu;Hh(Ez z(Io%Vy{#qtW=yJ!1oxAy(#wjNcOoW0??iDv`FurB8&a3Oxa-N;seOp;0}o1HenOe& zMo&5;w3t?d?$-O$uGc8X$jsT$*=W%o9dhLp=22a~A&CJ@{~g#t`}4H;ie<86LuN;5 zC^3?Wg+JqeQ_BiEfta8+B&5PvQogX0?X(v6zwVU-^wXVV>9etUJ6cSF z1jWsx)V0_9CtB8fj<(N}Fr$Y;+0*>!U-MqaisiS{Zaq11e3q5K;0JDn|NQ>m`TPYr zk$*{G*$nveu?YjQY&7rkYD>5sS^uXY3YnY?b#-;ar8Y?*Dp#y^w##k7HFXqkrQaHq zkaJ{6(UJn3K6eE;q!;t4#5341l=ZyG5RP`9JCGc01o`8bOp|MbFWQ?Q{p zv*>bQ0x?nlwZ}_bdqljFB!#zk)P_lL{i^ah^6U!BOC3hEe+*43H~Q4RWq?qB?&Vrh zPn6|bb1^nLhK|?zrv}SUH+MhT7%lX4)z*Lfz+=-(96_q1^9|V{VDtWWOEx0{e5S9) zmWu7lt>pOCk$~v&J5P((9mhr5atZ`)grXeOgN9jtMkyi=S|w}H)%B^qNG}>DNhd2Y zAsZ2e`l9IAAgF~ItZB%)yKirCau#GzZ@a{4|MjL|EbBGFXeQRci>|eRz`)NB$l^AJ zzZI`!=$z5}_H%SgojE(~t?8@(cR>^?4Ro}iP_c{tT_;8J8ADM?Del*5&Rhqn+cTQ` z_q#Re>FLFxSW9bx4uVgtGXfC)mwV*O{nFDM5^6W>+n!GbnM3fz(y!O!`1ttY5NpCa zG7agQO8LfO1M3k7k44n<;9AbIS%niJ%CeP@&o2vRnx4^W_cP}VZA~!_RNJ+s8zS$E z;$@AEGgDJjBVWFzIR^{CeGWYlEBXV$v(!dp6V2%8DEhuR@i5j=fw1f2`Yg zoT`m1;rtErH1}7rJnLkpEpCP?avvjpmI*-V?&z!VV}F&-i0R(o?Ta&Yi<(`MsJ&y` z5T3hCMR^l%x+{}8)@7JYF#BbFJoj%nMDDMYYRr$Q$;@vXO-DEg9yP{NP?Fpti?vx( zdXlCUq&IcHz+<|w-nX@RXm_KhpYJ~!U|fZ11~E@0$^P_@~vQslM{4Z}hY)mjlrwPkgSmfE* zbqgv?rsWh?)!#`X-AKS?$g|w&t=rO5ni$ux77c#=4tpB-D9 z=LeLIElPv?kNB}x=a`FYfogO&?rVUo2 zpVQ%oGkc0LJ)o+da^01M(lYtj9Q3q!g>KAFimxuZ9-!|?mT{6%9uGULBQaMUHL@92 zGx~V>mu2NV>8`g=9hMcqGVP!_VgMAUZo(b0o}9qU@OA0#b@W~{{G_Ugl%2__6(D|(-}I_t%XFZbs= zVDi`*vtr}gxSc>Dg6T9#fzg`86LEF5XzAXCnd+3(#s6p@8jMBpI*Tq|ko)KN6mZeR z>Xq}SU*|vdvvcEVU`b2+Qa-*^#UzM2<6fJcM-5t17HFMBHaiAY+nSl>kPu@%KyYQQ z%A#(A^nmXSu5gnid5lMxep!QGRplpyf)t)!%O6z^>s^3BI0B-c+x3D3=#iF+rBXYj z&l9OtVTYtqCs38dE^6I;j1)gXYibx2A~xO}j=%sDBg2Ur1`hPzrVG2)#Q7zVBkGRP zgEsd`B4TfO2qK;^e^sFDn{k&CGisH0on>h+GF+38AUW53_BPcqYO5uVE^ejIy&HdP zEk#d&*SF>TmM-SOY-H{xH9_dJ|7#;1tP*a&@Ks&aO1bps;}}Pqojxurt5#S>RQLWI z@jnBlL>SImb#MO&=l(n+aw~}DGU-qKR=gkQ|J=#|a9=qEAx1_iWXU)ZZZhU|K8zqj zrgWUVdRI<-*n3aYe$-$GU}G1Vk?b65X6pZNXw04qKW(;_hAPS`5vmJ;J1Mi^ZuCpVWRteyZpiNDkD9H&0>aqZ*BVm-h=>ms^5^ONmA>JFhf9o$#>Vh zXB*O^GeV4hbxQ!1ub|#Th~vO*bGc6sy+%X#LQplG*>KI}&?ed8 z&%Yi8t&Ub$ciJJIL_qBCf4&zE7Nj)BSbiM;s{;&Me;mQDR*iW8k&HB;w`JP3Ju|G; zV%jA~K~T)AtyZiD$HbVb25GEznN~x8(a)}$vhrHD_49j>fyau0fiX2@5Okt?tyf%U zArdeSXvv=5lU3Sfg_m!@5kdeLEbG;AxTsDrwPW~^Kv1n%O(Cl@E%%`<8iB^7P!qF&nL!vW;(W?-pvpWvNwZncRQ36z%=*gP0LQYjvMp71Dck=g$uZ(PC%x z6^N(D8Wxq!sdKD@nUQ#S)`Eu8awi(|yuz zgk&VAT*!xn|RKTKG`ZWb2S?K|sOT*3OjZH4hhYkqSa- zh&w;a=o%5x?GqbwvcrhSYi`ZC5VFR!A#_3Qy5)sLI-0o*pf}I4P4NrT#%w$cj@X6=LI-p&*3*Q8++|aL z74tNsX%X!aOGG{9GZoU?aOw@#c^cwXuN8H6DQaFW1Y67pceyp_pU*t&>AwW;in>?L z#LbZRX1{94*^gtS*jGogirm+wBY6RuN!{UB^CCina1pV^{e1f8W~2CdI_9V{Lt z*d?Z0+HXGtfSR$?rHA($iZR+DoZJBFfRY;Wv#i-cP)oWI-~<^ZK`(M zl~vp07YFE#k-56a#C_X`ClIr*5B&XKwcUwz>03Gw>L$zYBR!dt9UpM{0HAAaCn(?P zV*Ms3u>bto+qE^bYwtYP3R|BTH5m7K$U~O5z{;TvYwaE2S~*xTmyD?QrU~2{zsYTG z_B9)S66S&iMOMl(%ZnuTvkc35;W5D|j@i3B21ib52@TKS(@Z4xDmim8_%%VEE;gOQ ziXYZrqk?asU%~!NOuTupA3y-M?4{8vG7O30`H5 zF5u`y-G?XW;5Ert-X0Vp6rs!<{$kX-EbPeT>-SWj{t_oMTz z_5%cQlu4}oOTV=TlaO{=xV@@3^Y)#gwHC>mB5Q`Db&D>C?~@6nk?3GQQXMKT6cSBh z{kr6TANjY5Ho(cGJ&~k%IFzfQiY(^cRrEJ>{XzB^_5gG3)E@~CaL}e`-rOtL)Jh=U zzIKiw9wY8D~czep{|_&`iU2eQC+9|m;<|+OV|6zd}|6HaU5W;xy;Tc z$z$ETn|M*5&6=~?*?w{{>;XBfL8eXk=vT4sdBv{XyY^!wvF>czPJ`yoofpX|w0F3< zd!ySvo7!(q7R%VG73l_EA-7M*-r|1UKOy*@3r&z#n8`iC>A5ZO-|eN*WR3^hd}0x5 z5*|FNy8@XfeDT&DjLD6w8kV)n$aZ*uf~TywO{QupkHx$$pMo*N*sz4md@xbu(ypun zp5N*x581;_ZO$|&@SAksbCb22E;5igY?fPsJ#2;ig(pVw$Z$(Pgm}$?+I>#C+B0_XzSWAu)>`cnErarXa`7imB0G(>B7&ur z`ES&=fb3>gKaIgTKQ;Bv?(bG8^YrjPVLj->R9prbt(9+-aJm*@_gl`7LuR8Ps(`AhJS z?<7Xea}NB>NN|5$6zrrl{koVn`ityEf@r75`EJ748qOiiA*@#;FA(`VbE30zo$^y$ zB4-5!3MM0EQBb8I=W}I)#VFR@`wGQRS)aH;4LP35puN>`DPDUs#s`I8(huw95IwOM z`$pGlbvbQOQG3z7b4JxM;^Q8h{&NCi5rc$QJTfVe-?C&$F-+`Y$Ls>b`IIlu@{P*; zzCdMo4;zT^O0joV9S{^;ujksA({nHTbJkX;?~FvPQ(R@GSYusJhUJ8N^b%*=3Y9JE z#!cCnJjU~D*RI70zqVRmbBp$K2Y*g*NC+)Ry|~YcgGjVCe-<`V5 zRpV`cADwGZNsiT$jsTZ=-DS^fM4Db@$!>3^hskGnnMHL{89JK<<;Pcd2Yd0Kh>QD0 z+k-h`aQ$I(uE4s6)d<6;pd0Dyw95zn6b5k`qedws&?+4UxNuBq-Dwiz@xIcvli#8y zX*r9p6TTjdsdxx-#(3uwTb}H;sdl;MCUi!0g+#<3XDlD+_150sCkk;j#~PZ_!Mnrn z+B>a35@;UWpOT=-z2Wt`9%6bL&U!x)8sTpZz5HQGfU(e}#^Q2F8F~<>s6U}4@9bzl z!>Pk*nf}RRn~-B>e8WY2tYmd9>}~zwaJ}TpMqIx`gF@wsrGujZ_40FF9Jnf*Qj?)H z+I6=ZX%oUge^;M&^_zQMg7Dmxq;7N%#f+PcZVjwlpQ}5c6>KQ5@cGsKOq7uldpnmF zR@9Qzc)W8Uwwc1QFB6liqp4$ix#m2$bwFp&ajg9JhSroDMc3gTMsSDVjrExHLu%f^ z-OvgH0acfY*E(apL6d=8Xk7PoGk%XCR_@87AzFpJe*91{wO^N7NUKiscaM-4a+Eo@ z?Ne^$)(6e=r%z_g<5HvbPwv?g-M~7^@^l|LNcou8d$PC2*Jk?mq{FiJXVA{T^V5@? zWfoI3-RdRb&r-A+6@|_UcdK7k_BIZn5YI<8h>>Z{p&!@{pMzb-V~vIj4nizc7|IsB=E3>~yPC`{iUYAy63PNeS*d~r)LtDppZ)5Ky#GRo z{%=j9nHfTRkA@(}tgXr~9pyPQPze&eBukd=ium~SBFmm*WiVaLz5j?nv$~}abDceCka?ObI=ZRb2*gkoU^xXpC3`~Q~r9+ zPI0fh!}iSO#&U+&S#Hd5=L(6;UlXJl4IN#xe#^i%vg-cCzHGI6^qgI=#R~<^0Qc#+ zVemkvX@q`|_esx36)_zgNhuROgB!MP^v^h~m+!VSU>$j*z((2STUgca-Jsn0Rh|E_rz+s%N9N;qDEz@M>XS~7gTKzt9-f;U9xpvj zAnm@PuJ-JAPH+947YW=i@!t!bzDV5=ph4v7o*z(VB}b8$#5(R^S$PyTQMafm>m4w!l6H%y(9zP0 zBBvwdq6JpdPRP#g&Zv`K9YhBH!U9T98{_^>x)bu3^&wShp$dTO5K#CKyI{LHJrTl9 zU;e#M0UkCsbD2(}a-cYOrI^=KwDzv4!p>5_-MyG6UG3w(u?mX}+u=}ex!wBM(8S0_ zy-gB3)5&ojWMD^{nxsF%%UtE&uuC^p6B9~HUxYt@a8EUW^K0B)@X^NTllAEN|)R>M-6lH=db8C+0cH zbHcvWpt>C&KkdNXtCi_IY>dkXjU)cT&Ehbx#00dYvrIg}c7PW_@}Uq4)Dfnt%?ObP_45!u5DG;lL(8@9 ze)l;z2ArIP36`$xRBu$tXhX^LF1y#jk+GPN+CD%d?L3RIN_(HLTxV;jah!3Vft8y? zYb`lpx8G{1cGAC&cnkIrWqSGedmv%77t&v$2nBsqdFQEFY~8yzL&~aAtHiaE;7bme zrHjy1-1|Iw4g!+yO86bVjf%RfY_A6yW>u%;T`3*Dr10(0VON8H*SDcAppn80@NZ`U9`C;tPJCh-d-SRP$V{t3UHe>@EEMU332e z9TY;)cmbFCDfCdOn0|9p=J54EUWlf3v)#_rh4vRMzB0e`kvhk&X*UxruKtm^vN+@o za`_;M%7S(e5EDRc1$vEL8&%I5ghBlN#fWng~Afi9)Uy6 zYwxJR=NtBm?z_e7GlyFfSrO0sRx1-y){&$?vSQl+CdD3;2M->kTb}&(3J3ku$GSEU ztFg9mjcCr6*yOuHz>zheMKbE@(Zw41A@T7n09h274A4WgaS3_;<>mF{)_CU?#!85t zyQv`%9vNwzmnMFq39U$%j$yK!1H6Wl{My>CfEKsua2JnQ!YvF~$|Yugh>Rr2V{boD z=3R1Jb*A_^QlvLd|8eu`W!|ZF~W*$be(|21{=f z&-Prg{%SZL`&p~yd_DbDE{-{mPNJyDI#;c9 zG&uf?!TqS<5H~Y=_dY&j&pcT~Pk*w3hSc(?{MfGlWb%{uWn6mAhdOTh2c;GRBAf-A zxCV$}Svl@&D?<(|&rfA$08A*?p5W1Sqfe_tmJib#=ZY*Pc$}+QR4vnoFnPe1Xbrdu zPvgaJbk51@%X3_b?w$9`Kcss@fk^q%6+6D``Ch9~UnUPiVhffB#y@C|;U%OKaNc7x z=e&P`oOlg)O%oANGs@X$-hEj&0#waI=!;x01R%EoX zG9`z$?e=?#tW3rUoRr9`oOQEiH*nwRRT=rJ)ZMr{>*HiS*-5M@OP@UWGV>cR#x1I4 zp|ix4od6~*Q7YFhktom@OZ8eFN{k>O*9oV^I73y4_(vM#)8#2VN91XiWH46JXZx>U z@jg7O5$?;mv@vooS5CK{lQQtu%rqKi|3MTg{hjCLx;jD`1`(9Q(~HN5gxtn44GgR` zm(`FYoj-upKX17*{f~4lBefdcvK$?kHH}hbdn=*ZA9N8qtO!Mi&Z6dB{f(RigWVP` z>!$kzGG~_qPHn%3-B6iLzmJ3Quw0$>i`hhh`f#2)3*=OW$Bmai;(inxVTqy8-~D^d z_Uk*}kwWjYUqmmv*YdxjUflNba6_e00CpoDd)UZmF>=ODWw>v@&wSFdd80#lg^pfo zH{3kc+&3@TYne2649e?9iw!&Ntb{E0@>?w;xR|vV%bTDvoNvRU$4I=!DPy2}N~DA#E5DV7u21K18hj4ku`VIJf5FJ&}*I z$~QSGUGm2V^lqn*_EoJbDINT?s$!j2vXF^aC&zQ-H{B<`sxS-pT=7pEIxfsC26g-&tx~_9cfHo&SUelvfbf+_D zx(#7zs(x;6y_xIrB#hm48YFEq5M;XS;sI)Z3 zF$r$UxR?cL*Kg)^7QQ<__@cM^4!66=lF#VxF=3de+hGH&*9&oY!Wh+l}LE z%?-Q6v$RG{4d{pL2M3e7{qp}^j|K^y>|Tb4iFE?%ct<*K8ZitPYR)V7rZ3J%x>m}A z$h5#QF!bL0WTSN-N)zg*NB03k;o~@ti<4k>?u8iB4yaz}UbDk1&;fctEGEnp`VNNJ zAs=o)rPUd+PG>aKmsoFz{UZVe37-Hu=g(<5HBs4X(_5+C)t5sDKD_ec>Dpv3mu}k` z?J0MJ`L)auH$d0qk`n7&`0#30Ww1^Z-;>rR)CntmNoIX)OzJcEd?Z2SB|dz8kV|veIxT?K}&1N=!de|MX0IiREQhpb(u0E4dcky9dRw$(~z;eKp2%5R+;%f0df=T}kw zV_ezU93kHgA^+^)kT`>Gib+e1Bdty^POVLjal`u2Da+DPBk$R=RLbk+<(I4|DX%e~ z$V~mLKrjBj%&Wb+|9vpXLPG38Ofi$YpnLSrdsoh1c8iKlazYv`HMTj$>$qY3tGxx? zH#Ct55#rmmua4Aq3l%+|S+k63)Zb33+qM8*^cL9^VkYI={nP<3)O2~ez8*R%YrmH{ z3oG@R>gvdt9CG1FAN#uF28|HF5sgV*j~$ z8~89*2C+G&d$h|>2v!k`%&CA^PK`ceU01dqm)qImu#AdA_%HMZx9i9U!>-94=yp#= zJ2dm?EpGmeTX?o6FYMXoe>7goOT1|0Z10j3s=8TJr>9xs-ga}~Y*a0ZWK$C_E{m~` zHYJ&koqbFiadS3XW4v|cfyyMW#r?hqR(XlFBn<@k_*qF*mp9Cda>?Wuy3fTH)H}0< zNN-@dCHbMTV|%dxu+&nKa-ypwa*?->aw0co6}XnNm;M8#<_P;hEN}9 zmynRC-eh{NQ$4(&{9)tX$@@OGI!Sw+nJY5uF~BcSdA(5aWVEQV*@xV7tKcXvh@bLg~G4<*2sHk&Jw|gv|OBVru!j`$~)br+`mUX-`I-4 zBEo3FakiyZ5TgWgM*XJB6ol*854DP0_DyisKinp-< ze62K@@+qQ-&kWn!9~D_2ND<{g!S{z_m;VzI48V$m2_5Dmc)9`*kmdj?*ebD)K|6nth-!WyGZ8o zV{ff}$GC_HI6^_+oT07QTL>O;p+aqRl57t zwb~i%dSI6SIuO>O*;C`3Gxsz`*=`yz2|jMS&6wiiP`?8arB&0>N}G((u#V}olQ8mX zgdNse#E$2qcb?QZdu7_}p{vc<8?0(oJtsEC_4`>t>BAY((&uJ=vR^_%?ylx?p5J&# z)j60f;-&>?jUS4an~sy>-XG}hpF=NE5$yzT*C!yRJ&#huV76)q{eU@bnOALKH^I84 z(on5VKOTF>QnOxN@o+7)WSC5uv+?7pP}bD#G=0acr9#1(>xk z6nRQbf6b_wxNLARV%TBpkd1ztzI$z{xXpv2jyF6vRpNty>Qs6dH$En{Q6n>#%ch#Fd4$ zm(6ItLn;|UJt-$H7IqK2G8*kS#_m=t<<7XTrYn|PM3%nOV7=xmqpx2vH$N|?&R*2*NP9b_>!RuE;9*v)ywQDF#7?jOgUtzlMEj~*<-Yv+MZMRZ;rt_q zuhKQBn16q0nJ-cC79tz!xbzYfOVPBd_6z&B4j+&Wh1O6^$WPWb9ymi-WlsQT5_>EotV@#jm}Z1u^|5M}Q!^6Ft(g+9Q* z*f)>2N1f=OM$TM8hS$qY4)|Rz)_e784p=2*EVo*Lt{31UUw?KQcbp;vOY}~nLY3rr z<}2(Wqp_!M=xfkf7cXCs3KWVjto0WncQ7IFE<PbZKD;0m&UJSMh0mk?g2-Z{^z&g2%fDcflFDMJ$+NscJ81<Z=HCDc%@IB94#T}Wt^F7(D@P>3 z6J<*8^O~g6aNoUqC)p$}+pXv<6NsFqxoFue4_Ft(Wn;=;wYYbG9$7*1(Yk1D6f@M1 z-smRP8PhEx-pCAr(STc^KRPA>y)p>&4olosjJ^5Bm3T zkV&?KRo>M5v{`MX6Ah_C=Yh+J7g=~2<1BVwXwXTzSOUskUOOf%nyZdzC+xpJ*szod zlG`R@Z~TA7y=7R_-~TualT;B91px_>lo-+>FqK9^L1~6TODJ7~i7iM-C^b|Bl(VTP_A621vuJ0jp$4w_&DcRi&f)4f3-Zfj_}g;y-&}Do_6X zHS%RN)$vjEzH1~Shno_XZIa_GAu9}~E}<{_)6cGeTjjq|y5N4Y;dqaX33c9kBE|%& z2+GzcQgeXYgZGmUGKwf}y8D9cLPA>L^bQ`G>sS!R6SJ}vE>9eCyL2fG+`rRgqOoro zRAB_0c#Xe##WyDYFtgXq6ZB4eZjhJ*m&!aOD~A>jA9jXo>MMYI#ppah^=q-Tl2unQ zqv#j;e#1f6PSf9@9NOsZyWC+=N-P3BICfw^I5n%V!nE8tNgE{uP6jL+KM$@ei(6Pk zFMG|{7xr^UzqJDCuhDqhjI%4Co`W%%oXtmPQR#r-`r)&ND0U|YoeQ54r_WK-c0tJ= zno^J?!1hgAfZNO}8v@TjB~{#Jdu9Z(vnfaE&QqmnQz=z{B274Sepvo&D-P)<(gH&pEwn2J{4f3eD6Dnh`+`F+Hn%wX+D> zGWavnO@L?g*$jV>v!!Ke(kEz!b+mUqQNsJ%g805)!>Ixp(MoVrf@LoLcwQ6^qk_*? zt7&UXLJxI8sk^49m%xQneE4fO6%~ke@~pGW;Ae^^-lbkB!GPZKM_}fpDQL6*;J_)9G4( zI9I+-Eb_aGH3_j|);ed`$=(V%cb-4LA2 z1+LQiQM8DQzFi59`?M@5w>)Ltlz{Z@xixt}s0aosnj9@BdjZ*kz@2tVqOABjfg7-_ zl}V%7mq-ksZW8_=lHf@=sOqaRPpC$Lv+($2xY0%hg$_|S5MURy2JVclW*@A6NpVwT|Df z<#fv~ELrRk>yOl9^R`VJAn++Vi0btB($acL`HqK^GkpBj_@p@2*R!XGp>-&)LO9_` zd}?PCW#p`k7UhJ zjCqbS#+7NWP&7?x5F>RF9O(1007Ocv2i;QqSV~go6B{7+gx82*P%g5PO|I;|Q>41N z@}7b=l@8L-b-^nw?+KWm(Z5u(l8#X-hHSpnNomc55bXC}i2_mXvJ5|$lyy>%uh-`fz6c=C#Xmu8&wAgK7tJtB9d1%DW*Qq;F_aH;UZCI5!N0cBUzsA7jzCz`fgQ#7?Sf-o`OT%m^Q}7tntxQKdPtATl=LUBrJv8-iDBz zAm$dH$>6DbNcW71tm`YI5FQyAJ4K%Q412%#Qq<=ImbEKzKTgxqm53`f)BCQD1=0cv z&Uq7Kow2}!zLbgBH!c|>hC$$}(uwciyC?nehH~Ng?+g*(PHt6D-Z&g>ml3+U>QQ-& zE&yNp1`3EcAYkYE>WlOik|YmdpimBSBk|zoewST`wBZc;oMDAa#qolQewpt55L54; zt^OSNuoDCL+`w`!3Cb%~MRjQxSDJ#=); zbF#+1vzC*84Y_I++0^3aPJQh5yFh#yg-nw=2#|wHI-$d~w6xaet;)7=Vy7X3)OAt& zKd7Vmk6x(LH&t!?=32Oc?TN?;$H=X#nV6ib;c+6tQ>My}RE!i?((b3q0xqn!w4nSX zV*b?zDRuKp;3oXwztwV(%EfNbio*V5cv?vAoIjT~)ToqH6K^VeoRk zWw|bREf^l1j<3jT&vEsJPV@Dj+umKks+_@c7$0+s>_1dNQ6?JXE^5oTCNj2b>sEv) zL<-L3(u&|q>Z)+0E(Hj;E0K9Nooe^luyv1d!O^r^;nK|o0{Adkm}g7ie|;D|Vk!ui zxb=-5pT;!orF5x>04I-_swqZ=Z)M%BnuYg``Bno0*7B1J4ki7vCh_LV0+`NJqYupZoC+a(J9)6zks1F9{WEF33h{Fx*JHjM^_3GysYyioZ% zl>`w=dmSZUy68Q0P53sR5Go|{(W@m>T?I;EOcIfzYVT)HY6m&&P{QLB7+m*s&wq6| zzsm5J2?yn)B`yU8_<-kvv|-^WbtNk_;5=ZAkKxcq%AXbufv7r~jals|brT;sIIC8~ zqA56O3)`(Il6mWas5Dr@Z; z6*wOpVikUmm7Vo7K>n*B>7|c00P4_T3KUDg>$mec=eNMHamD5)re}tET7=KJeD=1pWmZX^*@5eD&4S zN&EM^%(laoO})~ky+L`+?;ouzSLW1XH;egnSRk#4)k_s?7&+(hv*)#=XszISEQQ~r zcV?mR2m&%+K6n>kQZ7BS-EY zgyahe|1N9JA0U?*GKKJ<5Kj|ly7?3|H_l0<8FNA9oKbyFlOG4#)x7XhMv_qV?x@D_ zNB8QOQ1`JuQ=!s{Q6fF_uRBm6fsiOP#Y_F%T1Kj#kdPgeZhzu+Uf=eTdCdoDEpxvv z8!HW$3pX_f;=S>H(Fpx9@pYr_QmZHdGhyRM=nJSL4sBSBnYc9~kGj7(@@4)I#8?7| z6`PK}0^a5~g&ChRTgR&QKa9ky=-egZOY`320WXaSu=pz+P*hfu{a9Of0p#%TKXp8^ z+FOqcPpUV5kumQ6BpHEwE9v%39uEZZ2`3zjzQ)1_F4efE5gJ zXlFWG6Lygn!7S8S^U5!5M3SJJDVTJ|d+{=5CiB$>^lG`J#pfJSR(Js2<}0Uo=DwD9 zssZJ4rlp$TY14SuM=H&{B@0^u0fIWS=c%7$Z0UneD`Z+W>7fuvq_i!j7o$*kM5IsN z_)>BHEJ(eSnGC(>8Fcbm{h+(@txUQ#&K~$r0Ec$|xgpc*7L%h?qulx?qFOz!V@BoR z;;UVFc)Wn6L@hm-*;?ZK{TV0&OVMi32`bZh9wbE}BigX8Yb9Bw)_`Wt4!L?)v3O}9 zdE|wPgXmA6g$SNuQ00oRH^qAO9XtfUPtin{zbzd>%4UXwI7(|!=&zv2jR5WRdZAm6 zTOzoLzO9l!BRc;>gC@boBdh2ss5ALrzRYmHASx;fVGd{pJuupodG>=!@gCnt zB6}T*6c4eeK3fnfyVM{xd8cRkpt}kXmNQ4b*3?8+Z+O0*>d9`8?RFdzF8&d1%qjJJ z<(FJyqBIy;C#@ z$f{Y5Qago!dUCjcNWdA|rn-~V@@w}Or$n1#m(g`Lzdz`@*ABFq#+%$P*o6Nxh0pwa zFX#%95IW21IVyN({j*f|fo<7f&jK+~@{R?|JOZ}5`C0r8jd#(cg`~1)5Yz^vS+@twR zm1nQ}>|jJg;QWTCvwMY^gGodZH5h{1{T+Q#nDNlerw%m zi_t~C%Py`$oA%*;6^QH``yn=TN9*Gq$ol-|_-?mO2dnGpT`(sSR^LUxZ!{EwKcyZJ zWCAZv^vJI%*3PDuQ~C#>3`#^uCi-LYx5LK{Xv|_~EovIRg}D#-m_-W4^II99jSC#~ zKbC)Y17z`^rMO{bWhlC+5CZZTw`F{PGyeLT(%dQ2yQ%erIbnCJ&iUq;sBcD#5rhKSor-)=AbR*`hIkMaFCwAX;P^ErE)Io zp#1njQSVAbz!&ir1_%5kU?bwF;0GNvdb=WCl=;RpRl+i|f^3=Z^2xd8Pn~!J)RLxT zCvT%8o@la_DhAx0+Yf}xZ2s672|;l|$VcncMqOYP^O=*%L`Z)c>(-21%=D<=x4LoN z8#&?u@YV&~^1=Zw5ffA(P3GB zmV6=Kv?0Xc29MD=n%u*P$MoGwx!QYgf;W6B4+APAKNw~2#FPSBuowIy}d+=^|t zF{GKh09p^M%<^{GUk>C$RsB$-Tu?{x7e zm6=ET96Mj89-VT8s!=hKAEw6pplr0DP)oZb)08hpGA$JEUqxT8jg41Ps#dK;eoXV< zP-`jCRIyU{3yhTKU_br1_!d1u#{6Miq#Un--n?LpC}T>-v%i9mh5_EFuHhSRNMDb# zAYJo|TysW5)GfNKjZ{0vlJ+Jq2ZBitlT|H!YWn8ZGx`c?b1xA0krzy0RPM+y zv%IqwlV-jFirP8(MRS{;N9SAHTmG`sLDikSXWg}}<3M9C;eEKz)iqmYXSJ~E>wI8M%W@@L3YUq$==Q7Fjc_!;;e zvKLxz32km}NwVjmYnrvIZ`$MpVJU&N9{?42Ub-20?*iD-L5ePN_7bb1N)Z6Nux^RIFLRPh?V^) zpoC8KE3gew=-L0f^bYn)mBP2n(4I*9&Aszf+6B5I?M@=Z`pef(Qq6$VcoZ_n*EaIp9I!=I_2y&F6$pyom)fk9Anbomb5{TGp9Q zHcvMxnew>yc9891>kg4_lWsHpR{J7>9r2;Uljy3G!-H*O?sS2@CHitnXHQ*DOZlbE z%6dqukgi7UkzG%Wvyk*EgWCLxZ} zrO?p}&6jYtWp1%gorjCcRZb11RAW%oTQy-`3`!KNf)wWIVP#^fqch zJ1F$bKvsK%YAXb_`3;Pelw-OI!TolXU-hfa4ik1i0Y^UZ$1X=cC8eqpr74ySB%Nbg z)-`p^LW&xB7^vc_y6g*_Ql*ehZV{@K}qlocp0#QRlT1>b?3(}HGY zD+^1#QIXgkQsIH#-U$Lpz&rly^CD6N392f^Ppe5Ts`h!dpZf8@j&WQKtct9kRD#C_ zJq!#R6%6xD8vN7Hg9k3b1+d@^+G(rBXY&&#(kVu?ba!`{-)h#~yT3bIZHGr&b|xog z7qNMX>A4Qk{)NT-% zRt5u>mEjCY$u^!O?_h}$tZhOs7{}cr{u5Q(p@n}8QL$FPaLUf=)0PHH>#`ajWGWo232W*O@wz%Kms!XU;oPep!W(~ zybhlxnVPODr~Hx3cKbedHj2%CLW!UH%$jnu>CNr+mhjde0Es~gN5Py`eBYLv2qv>t z8M=%7i0|kLE%$c2oU$({rXWN)DCY8*iPo+l%wJ%EmeDRYviHY!ga~v3gTx$v-+Ljx za(*8no*nFWL)q%;c=S_@_vJ^qA9`%IBXfU)Ak(RkA<|kXu^wv}c5YT%2b6cj^haLd=T>1FmTgWzo zjz{OjSp1?KNS4c&3|pXk3r31G&=PU0`Z&UCNPmX0p7P8*>-GYZL+N3?8X&_lVykg- z@7G~fC{Q?tUTC$rNg#ng!~Ssh4tbl^^11KLTzkKkaum$E<5JF1A+6XdbzNR8O;T4h zU-cTN13Ux`6#TiFiQl2m9)h;(9ZYwgXxPE(yl?od=!LCWPH|*;eLmcbk7+%QZIGr2 z=M}!u_3&`;I1&_=*|cUjzqQ{C#O*a0$^VJu&2b=eTIQeKAAG_}Z@L|WqM73(t}j{! z4QMrGqq^Eb4ciiG0ahAl??d_>ST75N=ww!aaFXpKeJ(1$M`Fi!NnwH>(y&rXj;RY} zWGj1UdlZV*NFWY_US^B*1PeRIjBII{cUK#qmi&2xeP{T;o?sS$5BgU^7Pie3$tli| zzJk{-Q!PJ>oO|6TxIsFvdfJMdV9|EyxcNkrf+yxuji>{GWmN}AK)uvL5s2#Gqz=37 zMKusZ_XlWG&#@8VH!@ogwI#u35$C~u_m=lcn%*$mtR6sSZKY8Nzt|p%7z)hDkRJ)r zHeqb_t9&~Umjb>37v%P|+r!d-Ss*ifG_#hKqJN7uRWT$z?iXlH4^K+YB{?)RD9}kY zt5>tp(>o})OW*5WM=X0V9@-mqJtWNfs7ZjvAE^9M`|&{)1s2ASzqyfFb0s3f!?eGD zuAnG^NRQVvF%anUmnox}qbCN(7t}sWU?cDaMs&bhW&zA{Ku+8R(E0Nk%MGl{`*(+J5+(|c19w;Z$ z!53*wa6K|KUdqaHLNW2gZLbM`PdNyR9A{%1gUq&B8bRM=_&dYjccM9=oNP9jY<{I= z0<4M=6~SoIJLl4%XGtHZhD5k2d^OT=n#(Tfwz8kxcln zYlpHc)#>ly={Vim(UvaJQGD6()%9y!@AuDj=H9{H>u`V)>5X8EcD#z+q^2NA0!~Mj z&rBfGAJRY+*$Fvy@4g8ekfTr3;W=?}!hm>k=&Lnlc1ssIgDD3`@Ss&bW%i}PUVFKc ztK#DF*DOvnaSeWxFz-B&p8IqVmHF#Nl1BW}hw}r7#y%!7TMjj<7}b4+p9eCMPTV#m zQ2cMs%!vZSh**XZWc4y%mey*v8EqUP101vdKV zNQthhUS3DKP0BN|oTAR$)1k?f7%9103EL))3;AYpHpFf<;fb-lJ*F9KpUVpslw33YLG*|X5%FCT6GIoM6f7vT}L7Iuy z^5lhWUGv{RY{a*0rG-0tMF4J2y2POFprv)o69N@7{Z0{2m+M@)nfa zd;G8gcaqE?D0jzWw+$I6fHBA0&ncv+$sf+n`62~lI%>#g%T7)9wM=!|zYw_Cabb3h z6{{(XVq6_S$1af`FkSTQRb+I9 zYW2>NvRk1Oen8$IRfrJX`PSBGsL*Ec1vsyv+tC%b5a!arMY3eKexj`kD|W9^-q`oc zX}R?mRvC@)e?>hAJOm*IKCm1Jqhwzb!Vqjn4RYq(!GQn`a-40^+1m4t#9$C#@|d$d zjRTb`bz{2$+R>y0DUeKHNd6#CAWT&)>7`1n!JQ8uIIoU@pq$x8+NqxMk2dHVIOt$) zeB)s17?%|Fwz%(%1fDR!MpmHCsNN^6VW4`1cu;JTd8)Y|t*(d`I^3IENdnG`XT8f7 zw5oa~>9~oMan2QJ)Z%AnuEzZ{_}IXnBZJyLf};dN6TCEt`Svi$Y2%vS;`-=zxo&~p z^jnIq7eY(Yd!o5Ow-cpZTN}9wgOq7QemuM*DL<^dIsZnQiIMf(`;LWF(VKUN%W$*j z(A|5_hO7dyB{qY`nC30X7zzai{_(=oARq4K!WxtcXHQhhY4ekhBqUJv^cB$5R||GNe%9^6di0&_0%q9N*Mo zgGqr&PnMd7+dW+oG?Sk9T*k+ZWZa&eOx;h8o_0&yk(J#`fs_;Uve2X&DWt*2v`(9XkT~*ONcctw!q2(hmIkF~kEoQE&7}==N__%0`8$ z==#cQ*+-^&PN=v!Mx87z&^YUm!I(}Ko!gNqp<(lpI_Qb}9n4F&s=@t=h}AUU%PuIxxrNQYzI_F?;puSHF%o^c@mVfON)_WCol| zN7GX-mqKtEQN(xdoaf}_>{4f>xfYo~11i?hX;@2?asO0qfor92--PLV+SK4H3a+M3 z;5gEf<^CQC^mK{gBqG(mV)8lJ_=323rd=U>#SDK|&T+jYl6&_wbgubQW<+tGRbim| zVJEDh8n6~gwe1lB!#8o{nDB7kMA?~6f^h&D2QIuDLDFbXkSlO{1%m?G><1Cc)IefF%zzkRBYUQRqTicr?XV(78;H3ll;e4 z4v>Yti8L)X5g(7+geAd)m}+V=$7`hJ2Xof8i0xsE-kE|3QM*S=ob>yMMREggf|fHAVG5f_#4gLD6;>X06$G*ZU80{*Lm%sWybmG9zK2EUu>IfZH({7l2QZke8H^p-hM^b2)P%xzy>4K?bM73cl$f5& zsLvJKcLE;sq|#Zp0%FlMR$9y}>Ky~+B5M<=H~RJA#j|b`GHArFvSQ0-iIW4q`b^7H zBgfq{tzmEmLC^8=nFlG`4h~o)+#x?_5G}J`KKbvkOOaw~L3f86I&UB`2+T!Gg^lOM zmRKRvk?<(9urEA|l1OYojAYHbVEHgoXgO~&`5}7mLElvUn#5M^XN&f|b@P@1WE$uL zLRlHN?u;!hryB~Ej@E-~GX1!jE6`fY=|+6BR7|{n>l4g2p5ORBn>~x$k=VbU{Gq^$ zUl~bO86;RzV(1ZrweRE{q@=HM7aOq{=SOz!J>zdW(EW%9i)AxjYQ0$l-Lqyd5S?*a zI+Y7=qP{WW5Vd7T%nNOr(o4s}ZjZo+qhx18JR3JOo(_-HEXq1W)s8mMQSQ%)$=R{s ze@ivy9D5g3`91p%rOK4eS1@Zy(E0Rd2S}ItWes>vsfU#$70rq)u~asOy224Q6ZVbL zDh-%M9B9oy5NA$0_dtFv+yxY4gN`we&>AwG6sql*=U;vwh{&;lBMepo9(<+Qo-y+< zg&X$h!cb)WZ7KfR*BSLiIFuGz{-*<9=ahFG)-h@j^~3vB0qM?`{4ZIXV*?Vlu^_-m zxg}wjg@oPeD4Itm8@aEn zr#g$Nyq6-$`b}5*N^jpga$NdS#)9=fyr4aLXy#~<4*GDxvnNg@6*{9ljVeli->&H@ z&aE3@>nXVmIPtnl4;P| zs&Ltwf#hM83LQLZGvcSYZJTNSOFE!C;t-y`Tgid3s}*hO`i+Zx(39s!E9c6}x?jd! zD7AZVoc&{x`I zv-oeJA0Nb7o_&uw+W9`=A07?jM3ti)_Qz=vif!u2l76^$-C_t!{k45k^4rh@w$Q9G z>(=JHrODw1=f=>!0P?t})e?^1+eF=FTqZ`|CeC(!7dsIA#CfX-iZITuC6m~X26%nc z+M%bnIcO#XYSigw$~1)FWqLo1@+HdUiac}ce3#)`jjxTx+$Ua?x=8~bwYcAOk1mE+ zcdPA?X7|V-jeHeIv^-{RY+FEv>>$~KMqdr-cx3tqPFoe|2HQ2ayqz^GYI+-Wxj7Hp zYgH#)KcT3sekm%$BJRVjy&wjkRVCv&(Dw6HEqlef;tNw>;*Xm%0><4}v2DDcPD#1`z+6P(2L)qa$KrNI+^bGA=LxRM`+qN33@kLtJn(*s zEhQ9L|HpiQKUeX6Lw=rNfm?k;NLv~w2s*Ch^2he#M1}oRh6Q?~XXcCOil7dG2jHHD zi3wH5Te{IRZ+hE1RrOvgTXuekf`@en%l|+W>0q7Q3Vd$8U(50A5zDO{uR7aeyE=Ey zes@0lFV=wUN!x$2C+E#OF>AkjeODfnjg*?LG1xdScOOReDSvqN+5*=jp0m2tX)({7 z8PmX>XV2504y*8{40;%Hmii2@?iVvVw?=Twj)iL|ICxZgCQ_PQ(oJ-^geExL00-J%3PiqFfs z^qZk4q{bw-Z4B%xG69l$WZ?5oOwPY>e;|wK2N%KLPpi3#&VLtM9%%Xnl#iREO=Tv=k*nC9C15D!E6osIi zX4i%zQlN^yr&s->j)k96Rbf_^w>o=yjn>U-SXx!Q zaI~zz&Bi!4%xt(@LT?%rnv6uku`)>+=ufv}8o#ZtRZc&Cz0z+Oi$+t+`&?NcN+hA6 zVTbt>5kq!8#F`>U3&rC4E2L!!pTlZMxC`uq8x`h%buHh6eu;kc2seo>p6vK6UN}e# zU+>MWBHx_^B{c0m%c-a6m?0C8Nu@3uzvG%6S?#9Fo< zF#pNDf@PulsbB$IR+^Dn-eu{UZ}B~!It)2EsPEYY4gtV*%_|u4gC7?wGwA~hQqJ_P1NTeDrTnWJfjz=I6>ACzr_*#A91a^^BLn$u6wk< z``nLnnw6XH>a3(MHWiwUjee}%=j(X0e5vEJgoUY7HNsFutCEs1?GAxlv;hG9xh|Xp z=#bAy@8AD{39mz~ueye^J@YG3!}@&1_{~-qm@&3?!r3Mz;5{L2jlG%0KO2gsse`NU zb^3_u{|xd)|5$PN%QaZO_c!JL%xS@JOwgNXU3&}9SZSnOsrylKHfB^{_Izt;>w4W% zLnx)#{tFH`Sl@c@;O7Nuh2z>jv!%EwjN{F8<^QmAb5P`O3e~A7tF*=N%^4R>hBita zp$(p`^@U3DM<5n7T`oU^LV5OvCxC-JTY>i^-ys6xyZLhpI@j{V`}*ius6n}<{RMbg z@o!cl0@420n@!ZL4>6AI1vmTYWoJq<|9VsS3@`Fwz@7x!B^?J}`G4AAfBNn@Vai?K z?Pj65ISJ=7xg{^@xxL7<3!wmBfb7H9r?~e*cGo1wjuAh?A{o_@e`dLSIHi0|IpRaQ zOWK!`mXA=6J>I|P|Eb~t@()Tn9LPy|&-2Z_g?@{$I_${&;UyD&Z58(5Tdb2NPlK`J z$E)S{LMGhUJg=|6(|c?Dp$q$`iWUZb%w4BrZWy*XUP^^*?3*i=%EBqk*THeeMF-cF z1I?B~k71kgPh$pBuX$hw=YxAmJ|kxeq-4J&n)k$eeFj= zx-v(LoKf`>yPIp>N!tjEp zv@*umYG22TjJo{#4MuTy_KQEXYA~7BtR_?Qp8Cv=W{QQ>3 zyH-97Sb)>1q{mTon6-KAdGIj$`F2JoTkuQwbi0zupdd(oCp@U<6*BG zMqdLbx>Zkz+r>;@W8S9Ru7{%xKw?%&*?qR;G(iU9STS!DIy6`qGKOocGz&b>$A_JG z8NhP#V4Syu`eG(u7)I%p--+*VZA_*i=DfeEpRjP=L@e*<>jMk-yb{-Sdg2Q|^V#1n z?~3#?s#ROOjuQ+U&+lIM$&N1+OiPVx8TIfzeG%8ue191GND*O#Tkd6f5g7RN)7p6O ziH_vt6T>7MO09gM3i*6VEk5niHR>n!5Hx=nGvqMZ-rPq0kDWNCh%`#4f1rB@=j0WR z`M%h-dfqdB(x%uTDiZ8A+Rq-u=3Vz%DOvc53D1M0!d5z43qS~RH6)`dJ=utU&a=CxyHuC^E#PDKW&_>wTm<;Y!aG&)_YugI5;qH`8@CX zp4cgoV3cz>_6c6tWQ&sw#5>;eU6*}rhkbHxsV|wTxr>V!H}|0Kg}A%o3JMI)&d#S5 z6xbGQU5foRju}|e3?!rGoy1?Ae1Q5s};4xcgzu4jC7`!Ld9waCEK4ZsUy+oI0X`_7CNI4j*$!&Hu9d_d7h< zMJt##Dd)2Hx9J=I>z4^~fRD>%dEC2R-a|`?A#z7Ux@P$bpAjJ0Q!2n9$Yrob^)9q* z8QI;d*MHWmMLqMpWGiQpKdPk%0X6LW>#`E^hVKxQ0t7n#2bN=;0Nk@on%SEJBZUr3 z6-`zMu{D4HCpd1TXC7CYo}MO{+-3~8Y+;{CDlluz zPaZm!tQCBHwx?9%3BG6sHizYs?#f#{`S%=6IsUTC#z`$9|6$LTEMf(naQ%hV@%^RO zfJ$cn%s{V~=6Y>|3|{_(sMP~Ux8TKP0;Id=)aReP6v*jM&sRx`UY2ry2VW&kuP5)1-jFiZApgpz{V3FLZD4C(rdDrg8U6^l;_^oq^5i67^xND|}55luCJ@~SuysS)qot(|c zQu~y5u0xr=E0c<(Kq;+Ca%B3K<|%LcNGxtW`i0Ggv#J~fzAZoBxY@Je1+=a3E#pH^ z;wHbYI?NYk>Fzt_{k^yd7cYBd>ibjK*&soE2Y60@gNh%m0(zV}s?Y8Bw^@bNxtjL- zBR&tlsgESZI6gTec)5hO`n-aN`Dgew$=o{ZgxH}-9_2zd0oiiyl3S)F+$MB9Boi7L zkLWnk%d}T9@4O?vFVuQF_@uFf5)ffB` z!pjU@3w63wBw$zZdhPXUt0|g4;%!irgx?X|MwGtHryO_8FBa|Tg-hhjF;k=I!_8*{p? z0OyK%YJCyui)N<5AM^=Qj1<8eY@58rh&{!1$0D5K8M=sxV7^UEx2I9vejD}zsB5j5 z(K=y${nyVU=)nG=FXX`U?N|4H_{IrjDyg|;8{~hOzm!;@IwSW@s&-)B)9~qV12ZWS z6j`yUWB(Wo01X=fi114eLkP^?HZ*@f{C}VzK&1EtgYduS+@wIb8n9y*UcbrwUx-Kp zM7~iK`){k?p7_ENE`SY>1(Trf|1inOZ$K3E9lnxY__uM}H0l -Recon is the Web UI and analytics service for Ozone. It's an optional component, but strongly recommended as it can add additional visibility. +Recon serves as a management and monitoring console for Ozone. +It's an optional component, but it is strongly recommended to add it to the cluster +since Recon can help with troubleshooting the cluster at critical times. +Refer to [Recon Architecture]({{< ref "concept/Recon.md" >}}) for detailed architecture overview and +[Recon API]({{< ref "interface/ReconApi.md" >}}) documentation +for HTTP API reference. -Recon collects all the data from an Ozone cluster and **store** them in a SQL database for further analyses. - - 1. Ozone Manager data is downloaded in the background by an async process. A RocksDB snapshots are created on OM side periodically, and the incremental data is copied to Recon and processed. - 2. Datanodes can send Heartbeats not just to SCM but Recon. Recon can be a read-only listener of the Heartbeats and updates the local database based on the received information. - -Once Recon is configured, we are ready to start the service. +Recon is a service that brings its own HTTP web server and can be started by +the following command. {{< highlight bash >}} ozone --daemon start recon {{< /highlight >}} -## Notable configurations -key | default | description -----|---------|------------ -ozone.recon.http-address | 0.0.0.0:9888 | The address and the base port where the Recon web UI will listen on. -ozone.recon.address | 0.0.0.0:9891 | RPC address of the Recon. -ozone.recon.db.dir | none | Directory where the Recon Server stores its metadata. -ozone.recon.om.db.dir | none | Directory where the Recon Server stores its OM snapshot DB. -ozone.recon.om.snapshot.task.interval.delay | 10m | Interval in MINUTES by Recon to request OM DB Snapshot. + diff --git a/hadoop-hdds/docs/content/interface/ReconApi.md b/hadoop-hdds/docs/content/interface/ReconApi.md new file mode 100644 index 000000000000..dd033f39f0ca --- /dev/null +++ b/hadoop-hdds/docs/content/interface/ReconApi.md @@ -0,0 +1,511 @@ +--- +title: Recon API +weight: 4 +menu: + main: + parent: "Client Interfaces" +summary: Recon server supports HTTP endpoints to help troubleshoot and monitor Ozone cluster. +--- + + + +The Recon API v1 is a set of HTTP endpoints that help you understand the current +state of an Ozone cluster and to troubleshoot if needed. + +### HTTP Endpoints + +#### Containers + +* **/containers** + + **URL Structure** + ``` + GET /api/v1/containers + ``` + + **Parameters** + + * prevKey (optional) + + Only returns the containers with ID greater than the given prevKey. + Example: prevKey=1 + + * limit (optional) + + Only returns the limited number of results. The default limit is 1000. + + **Returns** + + Returns all the ContainerMetadata objects. + + ```json + { + "data": { + "totalCount": 3, + "containers": [ + { + "ContainerID": 1, + "NumberOfKeys": 834 + }, + { + "ContainerID": 2, + "NumberOfKeys": 833 + }, + { + "ContainerID": 3, + "NumberOfKeys": 833 + } + ] + } + } + ``` + +* **/containers/:id/keys** + + **URL Structure** + ``` + GET /api/v1/containers/:id/keys + ``` + + **Parameters** + + * prevKey (optional) + + Only returns the keys that are present after the given prevKey key prefix. + Example: prevKey=/vol1/bucket1/key1 + + * limit (optional) + + Only returns the limited number of results. The default limit is 1000. + + **Returns** + + Returns all the KeyMetadata objects for the given ContainerID. + + ```json + { + "totalCount":7, + "keys": [ + { + "Volume":"vol-1-73141", + "Bucket":"bucket-3-35816", + "Key":"key-0-43637", + "DataSize":1000, + "Versions":[0], + "Blocks": { + "0": [ + { + "containerID":1, + "localID":105232659753992201 + } + ] + }, + "CreationTime":"2020-11-18T18:09:17.722Z", + "ModificationTime":"2020-11-18T18:09:30.405Z" + }, + ... + ] + } + ``` + +* **/containers/missing** + + **URL Structure** + ``` + GET /api/v1/containers/missing + ``` + + **Parameters** + + No parameters. + + **Returns** + + Returns the MissingContainerMetadata objects for all the missing containers. + + ```json + { + "totalCount": 26, + "containers": [{ + "containerID": 1, + "missingSince": 1605731029145, + "keys": 7, + "pipelineID": "88646d32-a1aa-4e1a", + "replicas": [{ + "containerId": 1, + "datanodeHost": "localhost-1", + "firstReportTimestamp": 1605724047057, + "lastReportTimestamp": 1605731201301 + }, + ... + ] + }, + ... + ] + } + ``` + +* **/containers/:id/replicaHistory** + + **URL Structure** + ``` + GET /api/v1/containers/:id/replicaHistory + ``` + + **Parameters** + + No parameters. + + **Returns** + + Returns all the ContainerHistory objects for the given ContainerID. + + ```json + [ + { + "containerId": 1, + "datanodeHost": "localhost-1", + "firstReportTimestamp": 1605724047057, + "lastReportTimestamp": 1605730421294 + }, + ... + ] + ``` + +* **/containers/unhealthy** + + **URL Structure** + ``` + GET /api/v1/containers/unhealthy + ``` + + **Parameters** + + * batchNum (optional) + + The batch number (like "page number") of results to return. + Passing 1, will return records 1 to limit. 2 will return + limit + 1 to 2 * limit, etc. + + * limit (optional) + + Only returns the limited number of results. The default limit is 1000. + + **Returns** + + Returns the UnhealthyContainerMetadata objects for all the unhealthy containers. + + ```json + { + "missingCount": 2, + "underReplicatedCount": 0, + "overReplicatedCount": 0, + "misReplicatedCount": 0, + "containers": [{ + "containerID": 1, + "containerState": "MISSING", + "unhealthySince": 1605731029145, + "expectedReplicaCount": 3, + "actualReplicaCount": 0, + "replicaDeltaCount": 3, + "reason": null, + "keys": 7, + "pipelineID": "88646d32-a1aa-4e1a", + "replicas": [{ + "containerId": 1, + "datanodeHost": "localhost-1", + "firstReportTimestamp": 1605722960125, + "lastReportTimestamp": 1605731230509 + }, + ... + ] + }, + ... + ] + } + ``` + +* **/containers/unhealthy/:state** + + **URL Structure** + ``` + GET /api/v1/containers/unhealthy/:state + ``` + + **Parameters** + + * batchNum (optional) + + The batch number (like "page number") of results to return. + Passing 1, will return records 1 to limit. 2 will return + limit + 1 to 2 * limit, etc. + + * limit (optional) + + Only returns the limited number of results. The default limit is 1000. + + **Returns** + + Returns the UnhealthyContainerMetadata objects for the containers in the given state. + Possible unhealthy container states are `MISSING`, `MIS_REPLICATED`, `UNDER_REPLICATED`, `OVER_REPLICATED`. + The response structure is same as `/containers/unhealthy`. + +#### ClusterState + +* **/clusterState** + + **URL Structure** + ``` + GET /api/v1/clusterState + ``` + + **Parameters** + + No parameters. + + **Returns** + + Returns a summary of the current state of the Ozone cluster. + + ```json + { + "pipelines": 5, + "totalDatanodes": 4, + "healthyDatanodes": 4, + "storageReport": { + "capacity": 1081719668736, + "used": 1309212672, + "remaining": 597361258496 + }, + "containers": 26, + "volumes": 6, + "buckets": 26, + "keys": 25 + } + ``` + +#### Datanodes + +* **/datanodes** + + **URL Structure** + ``` + GET /api/v1/datanodes + ``` + + **Parameters** + + No parameters. + + **Returns** + + Returns all the datanodes in the cluster. + + ```json + { + "totalCount": 4, + "datanodes": [{ + "uuid": "f8f8cb45-3ab2-4123", + "hostname": "localhost-1", + "state": "HEALTHY", + "lastHeartbeat": 1605738400544, + "storageReport": { + "capacity": 270429917184, + "used": 358805504, + "remaining": 119648149504 + }, + "pipelines": [{ + "pipelineID": "b9415b20-b9bd-4225", + "replicationType": "RATIS", + "replicationFactor": 3, + "leaderNode": "localhost-2" + }, { + "pipelineID": "3bf4a9e9-69cc-4d20", + "replicationType": "RATIS", + "replicationFactor": 1, + "leaderNode": "localhost-1" + }], + "containers": 17, + "leaderCount": 1 + }, + ... + ] + } + ``` + +#### Pipelines + +* **/pipelines** + + **URL Structure** + ``` + GET /api/v1/pipelines + ``` + + **Parameters** + + No parameters. + + **Returns** + + Returns all the pipelines in the cluster. + + ```json + { + "totalCount": 5, + "pipelines": [{ + "pipelineId": "b9415b20-b9bd-4225", + "status": "OPEN", + "leaderNode": "localhost-1", + "datanodes": ["localhost-1", "localhost-2", "localhost-3"], + "lastLeaderElection": 0, + "duration": 23166128, + "leaderElections": 0, + "replicationType": "RATIS", + "replicationFactor": 3, + "containers": 0 + }, + ... + ] + } + ``` + +#### Tasks + +* **/task/status** + + **URL Structure** + ``` + GET /api/v1/task/status + ``` + + **Parameters** + + No parameters. + + **Returns** + + Returns the status of all the Recon tasks. + + ```json + [ + { + "taskName": "OmDeltaRequest", + "lastUpdatedTimestamp": 1605724099147, + "lastUpdatedSeqNumber": 186 + }, + ... + ] + ``` + +#### Utilization + +* **/utilization/fileCount** + + **URL Structure** + ``` + GET /api/v1/utilization/fileCount + ``` + + **Parameters** + + * volume (optional) + + Filters the results based on the given volume name. + + * bucket (optional) + + Filters the results based on the given bucket name. + + * fileSize (optional) + + Filters the results based on the given fileSize. + + **Returns** + + Returns the file counts within different file ranges with `fileSize` in the + response object being the upper cap for file size range. + + ```json + [{ + "volume": "vol-2-04168", + "bucket": "bucket-0-11685", + "fileSize": 1024, + "count": 1 + }, { + "volume": "vol-2-04168", + "bucket": "bucket-1-41795", + "fileSize": 1024, + "count": 1 + }, { + "volume": "vol-2-04168", + "bucket": "bucket-2-93377", + "fileSize": 1024, + "count": 1 + }, { + "volume": "vol-2-04168", + "bucket": "bucket-3-50336", + "fileSize": 1024, + "count": 2 + }] + ``` + +#### Metrics + +* **/metrics/:api** + + **URL Structure** + ``` + GET /api/v1/metrics/:api + ``` + + **Parameters** + + Refer to [Prometheus HTTP API Reference](https://prometheus.io/docs/prometheus/latest/querying/api/) + for complete documentation on querying. + + **Returns** + + This is a proxy endpoint for Prometheus and returns the same response as + the prometheus endpoint. + Example: /api/v1/metrics/query?query=ratis_leader_election_electionCount + + ```json + { + "status": "success", + "data": { + "resultType": "vector", + "result": [ + { + "metric": { + "__name__": "ratis_leader_election_electionCount", + "exported_instance": "33a5ac1d-8c65-4c74-a0b8-9314dfcccb42", + "group": "group-03CA9397D54B", + "instance": "ozone_datanode_1:9882", + "job": "ozone" + }, + "value": [ + 1599159384.455, + "5" + ] + } + ] + } + } + ``` + \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java index c05143eb3c3b..813baf55071a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskConfig.java @@ -53,8 +53,8 @@ public void setPipelineSyncTaskInterval(Duration interval) { defaultValue = "300s", tags = { ConfigTag.RECON, ConfigTag.OZONE }, description = "The time interval of the periodic check for " + - "containers with zero replicas in the cluster as reported by " + - "Datanodes." + "unhealthy containers in the cluster as reported " + + "by Datanodes." ) private long missingContainerTaskInterval = Duration.ofMinutes(5).toMillis(); From 54cca0bf330460f4aa23d9275d732aaf11c28044 Mon Sep 17 00:00:00 2001 From: micah zhao Date: Wed, 25 Nov 2020 10:00:57 +0800 Subject: [PATCH 37/51] HDDS-4308. Fix issue with quota update (#1489) * Fix issue with quota update * trigger new CI check * fix review issues. * fix review issues. --- hadoop-hdds/docs/content/feature/Quota.md | 9 +- hadoop-hdds/docs/content/feature/Quota.zh.md | 8 +- .../hadoop/ozone/client/OzoneVolume.java | 17 - .../hadoop/ozone/client/rpc/RpcClient.java | 10 +- .../hadoop/ozone/om/helpers/OmBucketInfo.java | 22 +- .../hadoop/ozone/om/helpers/OmVolumeArgs.java | 33 +- .../rpc/TestOzoneRpcClientAbstract.java | 296 +----------------- .../src/main/proto/OmClientProtocol.proto | 1 - .../bucket/OMBucketSetPropertyRequest.java | 15 +- .../om/request/file/OMFileCreateRequest.java | 8 +- .../request/key/OMAllocateBlockRequest.java | 16 +- .../om/request/key/OMKeyCommitRequest.java | 10 +- .../om/request/key/OMKeyCreateRequest.java | 7 +- .../om/request/key/OMKeyDeleteRequest.java | 7 +- .../ozone/om/request/key/OMKeyRequest.java | 23 +- .../om/request/key/OMKeysDeleteRequest.java | 7 +- .../S3MultipartUploadAbortRequest.java | 5 +- .../S3MultipartUploadCommitPartRequest.java | 10 +- .../response/key/OMAllocateBlockResponse.java | 4 - .../om/response/key/OMKeyCommitResponse.java | 4 - .../om/response/key/OMKeyCreateResponse.java | 4 - .../om/response/key/OMKeyDeleteResponse.java | 4 - .../om/response/key/OMKeysDeleteResponse.java | 4 - .../S3MultipartUploadAbortResponse.java | 4 - .../S3MultipartUploadCommitPartResponse.java | 5 +- .../TestOMBucketSetPropertyRequest.java | 3 +- .../volume/TestOMVolumeSetQuotaRequest.java | 3 +- 27 files changed, 84 insertions(+), 455 deletions(-) diff --git a/hadoop-hdds/docs/content/feature/Quota.md b/hadoop-hdds/docs/content/feature/Quota.md index 5be9f4db4d0c..933bbb50aec3 100644 --- a/hadoop-hdds/docs/content/feature/Quota.md +++ b/hadoop-hdds/docs/content/feature/Quota.md @@ -31,7 +31,12 @@ So far, we know that Ozone allows users to create volumes, buckets, and keys. A ## Currently supported 1. Storage Space level quota -Administrators should be able to define how much storage space a Volume or Bucket can use. +Administrators should be able to define how much storage space a Volume or Bucket can use. The following Settings for Storage space quota are currently supported: +a. By default, the quota for volume and bucket is not enabled. +b. When volume quota is enabled, the total size of bucket quota cannot exceed volume. +c. Bucket quota can be set separately without enabling Volume quota. The size of bucket quota is unrestricted at this point. +d. Volume quota is not currently supported separately, and volume quota takes effect only if bucket quota is set. Because ozone only check the usedBytes of the bucket when we write the key. + ## Client usage ### Storage Space level quota @@ -59,7 +64,7 @@ bin/ozone sh bucket setquota --space-quota 10GB /volume1/bucket1 ``` This behavior changes the quota for Bucket1 to 10GB -A bucket quota should not be greater than its Volume quota. Let's look at an example. If we have a 10MB Volume and create five buckets under that Volume with a quota of 5MB, the total quota is 25MB. In this case, the bucket creation will always succeed, and we check the quota for bucket and volume when the data is actually written. Each write needs to check whether the current bucket is exceeding the limit and the current total volume usage is exceeding the limit. +Total bucket quota should not be greater than its Volume quota. If we have a 10MB Volume, The sum of the sizes of all buckets under this volume cannot exceed 10MB, otherwise the bucket set quota fails. #### Clear the quota for Volume1. The Bucket cleanup command is similar. ```shell diff --git a/hadoop-hdds/docs/content/feature/Quota.zh.md b/hadoop-hdds/docs/content/feature/Quota.zh.md index 4cc1371668d6..b3f0c3c3187e 100644 --- a/hadoop-hdds/docs/content/feature/Quota.zh.md +++ b/hadoop-hdds/docs/content/feature/Quota.zh.md @@ -29,7 +29,11 @@ menu: ## 目前支持的 1. Storage space级别配额 - 管理员应该能够定义一个Volume或Bucket可以使用多少存储空间。 + 管理员应该能够定义一个Volume或Bucket可以使用多少存储空间。目前支持以下storage space quota的设置: + a. 默认情况下volume和bucket的quota不启用。 + b. 当volume quota启用时,bucket quota的总大小不能超过volume。 + c. 可以在不启用volume quota的情况下单独给bucket设置quota。此时bucket quota的大小是不受限制的。 + d. 目前不支持单独设置volume quota,只有在设置了bucket quota的情况下volume quota才会生效。因为ozone在写入key时只检查bucket的usedBytes。 ## 客户端用法 ### Storage space级别配额 @@ -56,7 +60,7 @@ bin/ozone sh bucket setquota --space-quota 10GB /volume1/bucket1 ``` 该行为将bucket1的配额更改为10GB -一个bucket配额 不应大于其Volume的配额。让我们看一个例子,如果我们有一个10MB的Volume,并在该Volume下创建5个Bucket,配额为5MB,则总配额为25MB。在这种情况下,创建存储桶将始终成功,我们会在数据真正写入时检查bucket和volume的quota。每次写入需要检查当前bucket的是否超上限,当前总的volume使用量是否超上限。 +bucket的总配额 不应大于其Volume的配额。让我们看一个例子,如果我们有一个10MB的Volume,该volume下所有bucket的大小之和不能超过10MB,否则设置bucket quota将失败。 #### 清除Volume1的配额, Bucket清除命令与此类似 ```shell diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java index 0e9e94285423..b54692addd87 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneVolume.java @@ -85,8 +85,6 @@ public class OzoneVolume extends WithMetadata { private int listCacheSize; - private long usedBytes; - /** * Constructs OzoneVolume instance. * @param conf Configuration object. @@ -135,17 +133,6 @@ public OzoneVolume(ConfigurationSource conf, ClientProtocol proxy, this.modificationTime = Instant.ofEpochMilli(modificationTime); } - @SuppressWarnings("parameternumber") - public OzoneVolume(ConfigurationSource conf, ClientProtocol proxy, - String name, String admin, String owner, long quotaInBytes, - long quotaInCounts, long creationTime, long modificationTime, - List acls, Map metadata, - long usedBytes) { - this(conf, proxy, name, admin, owner, quotaInBytes, quotaInCounts, - creationTime, acls, metadata); - this.usedBytes = usedBytes; - } - @SuppressWarnings("parameternumber") public OzoneVolume(ConfigurationSource conf, ClientProtocol proxy, String name, String admin, String owner, long quotaInBytes, @@ -269,10 +256,6 @@ public List getAcls() { return acls; } - public long getUsedBytes() { - return usedBytes; - } - /** * Sets/Changes the owner of this Volume. * @param userName new owner diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index c96c3efbbafb..532a3f38fb65 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -312,8 +312,7 @@ public OzoneVolume getVolumeDetails(String volumeName) volume.getModificationTime(), volume.getAclMap().ozoneAclGetProtobuf().stream(). map(OzoneAcl::fromProtobuf).collect(Collectors.toList()), - volume.getMetadata(), - volume.getUsedBytes().sum()); + volume.getMetadata()); } @Override @@ -369,8 +368,7 @@ public List listVolumes(String user, String volumePrefix, volume.getModificationTime(), volume.getAclMap().ozoneAclGetProtobuf().stream(). map(OzoneAcl::fromProtobuf).collect(Collectors.toList()), - volume.getMetadata(), - volume.getUsedBytes().sum())) + volume.getMetadata())) .collect(Collectors.toList()); } @@ -625,7 +623,7 @@ public OzoneBucket getBucketDetails( .getEncryptionKeyInfo().getKeyName() : null, bucketInfo.getSourceVolume(), bucketInfo.getSourceBucket(), - bucketInfo.getUsedBytes().sum(), + bucketInfo.getUsedBytes(), bucketInfo.getQuotaInBytes(), bucketInfo.getQuotaInCounts() ); @@ -652,7 +650,7 @@ public List listBuckets(String volumeName, String bucketPrefix, .getEncryptionKeyInfo().getKeyName() : null, bucket.getSourceVolume(), bucket.getSourceBucket(), - bucket.getUsedBytes().sum(), + bucket.getUsedBytes(), bucket.getQuotaInBytes(), bucket.getQuotaInCounts())) .collect(Collectors.toList()); diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java index d25cb1257648..a23bbfc1dc06 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmBucketInfo.java @@ -20,7 +20,6 @@ import java.util.ArrayList; import java.util.BitSet; -import java.util.concurrent.atomic.LongAdder; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; @@ -80,7 +79,7 @@ public final class OmBucketInfo extends WithObjectID implements Auditable { private final String sourceBucket; - private final LongAdder usedBytes = new LongAdder(); + private long usedBytes; private long quotaInBytes; private long quotaInCounts; @@ -132,7 +131,7 @@ private OmBucketInfo(String volumeName, this.bekInfo = bekInfo; this.sourceVolume = sourceVolume; this.sourceBucket = sourceBucket; - this.usedBytes.add(usedBytes); + this.usedBytes = usedBytes; this.quotaInBytes = quotaInBytes; this.quotaInCounts = quotaInCounts; } @@ -241,9 +240,14 @@ public String getSourceBucket() { } - public LongAdder getUsedBytes() { + public long getUsedBytes() { return usedBytes; } + + public void incrUsedBytes(long bytes) { + this.usedBytes += bytes; + } + public long getQuotaInBytes() { return quotaInBytes; } @@ -324,7 +328,7 @@ public Builder toBuilder() { .setSourceBucket(sourceBucket) .setAcls(acls) .addAllMetadata(metadata) - .setUsedBytes(usedBytes.sum()) + .setUsedBytes(usedBytes) .setQuotaInBytes(quotaInBytes) .setQuotaInCounts(quotaInCounts); } @@ -489,7 +493,7 @@ public BucketInfo getProtobuf() { .setModificationTime(modificationTime) .setObjectID(objectID) .setUpdateID(updateID) - .setUsedBytes(usedBytes.sum()) + .setUsedBytes(usedBytes) .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) .setQuotaInBytes(quotaInBytes) .setQuotaInCounts(quotaInCounts); @@ -557,7 +561,7 @@ public String getObjectInfo() { ", isVersionEnabled='" + isVersionEnabled + "'" + ", storageType='" + storageType + "'" + ", creationTime='" + creationTime + "'" + - ", usedBytes='" + usedBytes.sum() + "'" + + ", usedBytes='" + usedBytes + "'" + ", quotaInBytes='" + quotaInBytes + "'" + ", quotaInCounts='" + quotaInCounts + '\'' + sourceInfo + @@ -582,7 +586,7 @@ public boolean equals(Object o) { storageType == that.storageType && objectID == that.objectID && updateID == that.updateID && - usedBytes.sum() == that.usedBytes.sum() && + usedBytes == that.usedBytes && Objects.equals(sourceVolume, that.sourceVolume) && Objects.equals(sourceBucket, that.sourceBucket) && Objects.equals(metadata, that.metadata) && @@ -609,7 +613,7 @@ public String toString() { ", objectID=" + objectID + ", updateID=" + updateID + ", metadata=" + metadata + - ", usedBytes=" + usedBytes.sum() + + ", usedBytes=" + usedBytes + ", quotaInBytes=" + quotaInBytes + ", quotaInCounts=" + quotaInCounts + '}'; diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java index fa7b69725656..13c67c8ad9bb 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmVolumeArgs.java @@ -23,7 +23,6 @@ import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.concurrent.atomic.LongAdder; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; @@ -47,7 +46,6 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable { private long quotaInBytes; private long quotaInCounts; private final OmOzoneAclMap aclMap; - private final LongAdder usedBytes = new LongAdder(); /** * Private constructor, constructed via builder. @@ -57,10 +55,9 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable { * @param quotaInBytes - Volume Quota in bytes. * @param quotaInCounts - Volume Quota in counts. * @param metadata - metadata map for custom key/value data. - * @param usedBytes - Volume Quota Usage in bytes. * @param aclMap - User to access rights map. * @param creationTime - Volume creation time. - * @param objectID - ID of this object. + * @param objectID - ID of this object. * @param updateID - A sequence number that denotes the last update on this * object. This is a monotonically increasing number. */ @@ -68,15 +65,14 @@ public final class OmVolumeArgs extends WithObjectID implements Auditable { "builder."}) private OmVolumeArgs(String adminName, String ownerName, String volume, long quotaInBytes, long quotaInCounts, Map metadata, - long usedBytes, OmOzoneAclMap aclMap, long creationTime, - long modificationTime, long objectID, long updateID) { + OmOzoneAclMap aclMap, long creationTime, long modificationTime, + long objectID, long updateID) { this.adminName = adminName; this.ownerName = ownerName; this.volume = volume; this.quotaInBytes = quotaInBytes; this.quotaInCounts = quotaInCounts; this.metadata = metadata; - this.usedBytes.add(usedBytes); this.aclMap = aclMap; this.creationTime = creationTime; this.modificationTime = modificationTime; @@ -177,10 +173,6 @@ public OmOzoneAclMap getAclMap() { return aclMap; } - public LongAdder getUsedBytes() { - return usedBytes; - } - /** * Returns new builder class that builds a OmVolumeArgs. * @@ -204,8 +196,6 @@ public Map toAuditMap() { String.valueOf(this.quotaInCounts)); auditMap.put(OzoneConsts.OBJECT_ID, String.valueOf(this.getObjectID())); auditMap.put(OzoneConsts.UPDATE_ID, String.valueOf(this.getUpdateID())); - auditMap.put(OzoneConsts.USED_BYTES, - String.valueOf(this.usedBytes)); return auditMap; } @@ -241,7 +231,6 @@ public static class Builder { private OmOzoneAclMap aclMap; private long objectID; private long updateID; - private long usedBytes; /** * Sets the Object ID for this Object. @@ -319,11 +308,6 @@ public Builder addAllMetadata(Map additionalMetaData) { return this; } - public Builder setUsedBytes(long quotaUsage) { - this.usedBytes = quotaUsage; - return this; - } - public Builder addOzoneAcls(OzoneAclInfo acl) throws IOException { aclMap.addAcl(acl); return this; @@ -338,8 +322,8 @@ public OmVolumeArgs build() { Preconditions.checkNotNull(ownerName); Preconditions.checkNotNull(volume); return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, - quotaInCounts, metadata, usedBytes, aclMap, creationTime, - modificationTime, objectID, updateID); + quotaInCounts, metadata, aclMap, creationTime, modificationTime, + objectID, updateID); } } @@ -359,7 +343,6 @@ public VolumeInfo getProtobuf() { .setModificationTime(modificationTime) .setObjectID(objectID) .setUpdateID(updateID) - .setUsedBytes(usedBytes.sum()) .build(); } @@ -374,7 +357,6 @@ public static OmVolumeArgs getFromProtobuf(VolumeInfo volInfo) volInfo.getQuotaInBytes(), volInfo.getQuotaInCounts(), KeyValueUtil.getFromProtobuf(volInfo.getMetadataList()), - volInfo.getUsedBytes(), aclMap, volInfo.getCreationTime(), volInfo.getModificationTime(), @@ -390,7 +372,6 @@ public String getObjectInfo() { ", owner='" + ownerName + '\'' + ", creationTime='" + creationTime + '\'' + ", quotaInBytes='" + quotaInBytes + '\'' + - ", usedBytes='" + usedBytes.sum() + '\'' + '}'; } @@ -406,7 +387,7 @@ public OmVolumeArgs copyObject() { OmOzoneAclMap cloneAclMap = aclMap.copyObject(); return new OmVolumeArgs(adminName, ownerName, volume, quotaInBytes, - quotaInCounts, cloneMetadata, usedBytes.sum(), cloneAclMap, - creationTime, modificationTime, objectID, updateID); + quotaInCounts, cloneMetadata, cloneAclMap, creationTime, + modificationTime, objectID, updateID); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java index fd61111f1e02..88222f1092e1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java @@ -815,7 +815,6 @@ public void testPutKey() throws IOException { } @Test - @SuppressWarnings("methodlength") public void testCheckUsedBytesQuota() throws IOException { String volumeName = UUID.randomUUID().toString(); String bucketName = UUID.randomUUID().toString(); @@ -829,74 +828,10 @@ public void testCheckUsedBytesQuota() throws IOException { store.createVolume(volumeName); volume = store.getVolume(volumeName); - - // Test volume quota. - // Set quota In Bytes for a smaller value - store.getVolume(volumeName).setQuota( - OzoneQuota.parseQuota("1 Bytes", 100)); volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); - // Test volume quota: write key. - // The remaining quota does not satisfy a block size, so the write fails. - try { - writeKey(bucket, UUID.randomUUID().toString(), ONE, value, valueLength); - } catch (IOException ex) { - countException++; - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } - // Write failed, volume usedBytes should be 0 - Assert.assertEquals(0L, store.getVolume(volumeName).getUsedBytes()); - - // Test volume quota: write file. - // The remaining quota does not satisfy a block size, so the write fails. - try { - writeFile(bucket, UUID.randomUUID().toString(), ONE, value, 0); - } catch (IOException ex) { - countException++; - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } - // Write failed, volume usedBytes should be 0 - Assert.assertEquals(0L, store.getVolume(volumeName).getUsedBytes()); - - // Test volume quota: write key(with two blocks), test allocateBlock fails. - store.getVolume(volumeName).setQuota( - OzoneQuota.parseQuota(blockSize + "Bytes", 100)); - try { - OzoneOutputStream out = bucket.createKey(UUID.randomUUID().toString(), - valueLength, STAND_ALONE, ONE, new HashMap<>()); - for (int i = 0; i <= blockSize / value.length(); i++) { - out.write(value.getBytes()); - } - out.close(); - } catch (IOException ex) { - countException++; - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } - // AllocateBlock failed, volume usedBytes should be 1 * blockSize. - Assert.assertEquals(blockSize, store.getVolume(volumeName).getUsedBytes()); - - // Test volume quota: write large key(with five blocks), the first four - // blocks will succeed,while the later block will fail. - store.getVolume(volumeName).setQuota( - OzoneQuota.parseQuota(5 * blockSize + "Bytes", 100)); - try { - OzoneOutputStream out = bucket.createKey(UUID.randomUUID().toString(), - valueLength, STAND_ALONE, ONE, new HashMap<>()); - for (int i = 0; i <= (4 * blockSize) / value.length(); i++) { - out.write(value.getBytes()); - } - out.close(); - } catch (IOException ex) { - countException++; - GenericTestUtils.assertExceptionContains("QUOTA_EXCEEDED", ex); - } - // AllocateBlock failed, volume usedBytes should be (4 + 1) * blockSize - Assert.assertEquals(5 * blockSize, - store.getVolume(volumeName).getUsedBytes()); - // Test bucket quota. - // Set quota In Bytes for a smaller value store.getVolume(volumeName).setQuota( OzoneQuota.parseQuota(Long.MAX_VALUE + " Bytes", 100)); bucketName = UUID.randomUUID().toString(); @@ -947,229 +882,7 @@ public void testCheckUsedBytesQuota() throws IOException { Assert.assertEquals(4 * blockSize, store.getVolume(volumeName).getBucket(bucketName).getUsedBytes()); - Assert.assertEquals(7, countException); - } - - @Test - @SuppressWarnings("methodlength") - public void testVolumeUsedBytes() throws IOException { - String volumeName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - OzoneVolume volume = null; - OzoneBucket bucket = null; - - int blockSize = (int) ozoneManager.getConfiguration().getStorageSize( - OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); - - // Write data larger than one block size. - String value = generateData(blockSize + 100, - (byte) RandomUtils.nextLong()).toString(); - - int valueLength = value.getBytes().length; - long currentQuotaUsage = 0L; - store.createVolume(volumeName); - volume = store.getVolume(volumeName); - // The initial value should be 0 - Assert.assertEquals(0L, volume.getUsedBytes()); - volume.createBucket(bucketName); - bucket = volume.getBucket(bucketName); - - //Case1: Test the volumeUsedBytes of ONE replications. - String keyName1 = UUID.randomUUID().toString(); - writeKey(bucket, keyName1, ONE, value, valueLength); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength, volume.getUsedBytes()); - Assert.assertEquals(valueLength, bucket.getUsedBytes()); - currentQuotaUsage += valueLength; - - // Case2: Test overwrite the same KeyName under ONE Replicates, the - // keyLocationVersions of the Key is 2. - String keyName2 = UUID.randomUUID().toString(); - writeKey(bucket, keyName2, ONE, value, valueLength); - // Overwrite the keyName2 - writeKey(bucket, keyName2, ONE, value, valueLength); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 2 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 2 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 2; - - // Case3: Test the volumeUsedBytes of THREE replications. - String keyName3 = UUID.randomUUID().toString(); - writeKey(bucket, keyName3, THREE, value, valueLength); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 3 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 3 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 3; - - // Case4: Test overwrite the same KeyName under THREE Replicates, the - // keyLocationVersions of the Key is 2. - String keyName4 = UUID.randomUUID().toString(); - writeKey(bucket, keyName4, THREE, value, valueLength); - // Overwrite the keyName4 - writeKey(bucket, keyName4, THREE, value, valueLength); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 3 * 2; - - //Case5: Do not specify the value Length, simulate HDFS api writing. - // Test the volumeUsedBytes of ONE replications. - String keyName5 = UUID.randomUUID().toString(); - writeFile(bucket, keyName5, ONE, value, 0); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength; - - // Case6: Do not specify the value Length, simulate HDFS api writing. - // Test overwrite the same KeyName under ONE Replicates, the - // keyLocationVersions of the Key is 2. - String keyName6 = UUID.randomUUID().toString(); - writeFile(bucket, keyName6, ONE, value, 0); - // Overwrite the keyName6 - writeFile(bucket, keyName6, ONE, value, 0); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 2 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 2 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 2; - - // Case7: Do not specify the value Length, simulate HDFS api writing. - // Test the volumeUsedBytes of THREE replications. - String keyName7 = UUID.randomUUID().toString(); - writeFile(bucket, keyName7, THREE, value, 0); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 3 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 3 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 3; - - // Case8: Do not specify the value Length, simulate HDFS api writing. - // Test overwrite the same KeyName under THREE Replicates, the - // keyLocationVersions of the Key is 2. - String keyName8 = UUID.randomUUID().toString(); - writeFile(bucket, keyName8, THREE, value, 0); - // Overwrite the keyName8 - writeFile(bucket, keyName8, THREE, value, 0); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, - volume.getUsedBytes()); - Assert.assertEquals(valueLength * 3 * 2 + currentQuotaUsage, - bucket.getUsedBytes()); - currentQuotaUsage += valueLength * 3 * 2; - - // Case9: Test volumeUsedBytes when delete key of ONE replications. - bucket.deleteKey(keyName1); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(currentQuotaUsage - valueLength, - volume.getUsedBytes()); - Assert.assertEquals(currentQuotaUsage - valueLength, - bucket.getUsedBytes()); - currentQuotaUsage -= valueLength; - - // Case10: Test volumeUsedBytes when delete key of THREE - // replications. - bucket.deleteKey(keyName3); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(currentQuotaUsage - valueLength * 3, - volume.getUsedBytes()); - Assert.assertEquals(currentQuotaUsage - valueLength * 3, - bucket.getUsedBytes()); - currentQuotaUsage -= valueLength * 3; - - // Case11: Test volumeUsedBytes when Test Delete keys. At this - // point all keys are deleted, volumeUsedBytes should be 0 - List keyList = new ArrayList<>(); - keyList.add(keyName2); - keyList.add(keyName4); - keyList.add(keyName5); - keyList.add(keyName6); - keyList.add(keyName7); - keyList.add(keyName8); - bucket.deleteKeys(keyList); - volume = store.getVolume(volumeName); - bucket = volume.getBucket(bucketName); - Assert.assertEquals(0, volume.getUsedBytes()); - Assert.assertEquals(0, bucket.getUsedBytes()); - } - - @Test - public void testVolumeQuotaWithMultiThread() throws IOException, - InterruptedException{ - String volumeName = UUID.randomUUID().toString(); - - int blockSize = (int) ozoneManager.getConfiguration().getStorageSize( - OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); - // Write data larger than one block size. - String value = generateData(blockSize + 100, - (byte) RandomUtils.nextLong()).toString(); - - int valueLength = value.getBytes().length; - long currentQuotaUsage = 0L; - store.createVolume(volumeName); - OzoneVolume volume = store.getVolume(volumeName); - // The initial value should be 0 - Assert.assertEquals(0L, volume.getUsedBytes()); - - CountDownLatch latch = new CountDownLatch(2); - AtomicInteger failCount = new AtomicInteger(0); - - // Multiple threads write different buckets and ensure that the volume - // quota is correct. - Runnable r = () -> { - try { - for (int i = 0; i < 10; i++) { - String keyName = UUID.randomUUID().toString(); - String bucketName = UUID.randomUUID().toString(); - volume.createBucket(bucketName); - OzoneBucket bucket = volume.getBucket(bucketName); - OzoneOutputStream out = bucket.createKey(keyName, valueLength, - STAND_ALONE, ONE, new HashMap<>()); - out.write(value.getBytes()); - out.close(); - } - latch.countDown(); - } catch (IOException ex) { - latch.countDown(); - failCount.incrementAndGet(); - } - }; - - Thread thread1 = new Thread(r); - Thread thread2 = new Thread(r); - - thread1.start(); - thread2.start(); - - latch.await(6000, TimeUnit.SECONDS); - - if (failCount.get() > 0) { - fail("testVolumeQuotaWithMultiThread failed"); - } - currentQuotaUsage += valueLength * 10 * 2; - Assert.assertEquals(currentQuotaUsage, - store.getVolume(volumeName).getUsedBytes()); - + Assert.assertEquals(3, countException); } private void writeKey(OzoneBucket bucket, String keyName, @@ -1203,8 +916,6 @@ public void testUsedBytesWithUploadPart() throws IOException { store.createVolume(volumeName); OzoneVolume volume = store.getVolume(volumeName); - // The initial value should be 0 - Assert.assertEquals(0L, volume.getUsedBytes()); volume.createBucket(bucketName); OzoneBucket bucket = volume.getBucket(bucketName); OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName, @@ -1223,14 +934,11 @@ public void testUsedBytesWithUploadPart() throws IOException { sampleData.length()); ozoneOutputStream.close(); - Assert.assertEquals(valueLength, store.getVolume(volumeName) - .getUsedBytes()); Assert.assertEquals(valueLength, store.getVolume(volumeName) .getBucket(bucketName).getUsedBytes()); - // Abort uploaded partKey and the usedBytes of volume should be 0. + // Abort uploaded partKey and the usedBytes of bucket should be 0. bucket.abortMultipartUpload(keyName, uploadID); - Assert.assertEquals(0, store.getVolume(volumeName).getUsedBytes()); Assert.assertEquals(0, store.getVolume(volumeName) .getBucket(bucketName).getUsedBytes()); } diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto index b347dc1b9df2..613838f09eba 100644 --- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto +++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto @@ -364,7 +364,6 @@ message VolumeInfo { optional uint64 updateID = 9; optional uint64 modificationTime = 10; optional uint64 quotaInCounts = 11; - optional uint64 usedBytes = 12; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 583facbc0fca..415466138e5b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -48,16 +48,11 @@ import org.apache.hadoop.ozone.om.response.bucket.OMBucketSetPropertyResponse; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .BucketArgs; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .OMResponse; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetBucketPropertyRequest; -import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos - .SetBucketPropertyResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.BucketArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SetBucketPropertyResponse; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 6ca3cc374609..5c8dc06e3169 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -285,7 +285,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, * ozoneManager.getScmBlockSize() * omKeyInfo.getFactor().getNumber(); checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace); - checkVolumeQuotaInBytes(omVolumeArgs, preAllocatedSpace); // Add to cache entry can be done outside of lock for this openKey. // Even if bucket gets deleted, when commitKey we shall identify if @@ -300,9 +299,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, bucketName, Optional.absent(), Optional.of(missingParentInfos), trxnLogIndex); - // update usedBytes atomically. - omVolumeArgs.getUsedBytes().add(preAllocatedSpace); - omBucketInfo.getUsedBytes().add(preAllocatedSpace); + omBucketInfo.incrUsedBytes(preAllocatedSpace); // Prepare response omResponse.setCreateFileResponse(CreateFileResponse.newBuilder() @@ -311,7 +308,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setOpenVersion(openVersion).build()) .setCmdType(Type.CreateFile); omClientResponse = new OMFileCreateResponse(omResponse.build(), - omKeyInfo, missingParentInfos, clientID, omVolumeArgs, omBucketInfo); + omKeyInfo, missingParentInfos, clientID, omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index 194e7ef9de1c..1fd4b0754679 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -56,6 +56,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; /** * Handles allocate block request. @@ -168,6 +169,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, IOException exception = null; OmVolumeArgs omVolumeArgs = null; OmBucketInfo omBucketInfo = null; + boolean acquiredLock = false; try { keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); @@ -195,13 +197,15 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, List newLocationList = Collections.singletonList( OmKeyLocationInfo.getFromProtobuf(blockLocation)); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); + + acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK, + volumeName, bucketName); omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // check bucket and volume quota long preAllocatedSpace = newLocationList.size() * ozoneManager.getScmBlockSize() * openKeyInfo.getFactor().getNumber(); checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace); - checkVolumeQuotaInBytes(omVolumeArgs, preAllocatedSpace); // Append new block openKeyInfo.appendNewBlocks(newLocationList, false); @@ -216,14 +220,12 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, new CacheKey<>(openKeyName), new CacheValue<>(Optional.of(openKeyInfo), trxnLogIndex)); - // update usedBytes atomically. - omVolumeArgs.getUsedBytes().add(preAllocatedSpace); - omBucketInfo.getUsedBytes().add(preAllocatedSpace); + omBucketInfo.incrUsedBytes(preAllocatedSpace); omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder() .setKeyLocation(blockLocation).build()); omClientResponse = new OMAllocateBlockResponse(omResponse.build(), - openKeyInfo, clientID, omVolumeArgs, omBucketInfo); + openKeyInfo, clientID, omVolumeArgs, omBucketInfo.copyObject()); LOG.debug("Allocated block for Volume:{}, Bucket:{}, OpenKey:{}", volumeName, bucketName, openKeyName); @@ -237,6 +239,10 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); + if (acquiredLock) { + omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName, + bucketName); + } } auditLog(auditLogger, buildAuditMessage(OMAction.ALLOCATE_BLOCK, auditMap, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index b1d47de0d281..c914bc0e512c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -158,7 +158,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - // Check for directory exists with same name, if it exists throw error. + // Check for directory exists with same name, if it exists throw error. if (ozoneManager.getEnableFileSystemPaths()) { if (checkDirectoryAlreadyExists(volumeName, bucketName, keyName, omMetadataManager)) { @@ -167,7 +167,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, } } - omKeyInfo = omMetadataManager.getOpenKeyTable().get(dbOpenKey); if (omKeyInfo == null) { throw new OMException("Failed to commit key, as " + dbOpenKey + @@ -196,18 +195,17 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, int factor = omKeyInfo.getFactor().getNumber(); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); - // update usedBytes atomically. // Block was pre-requested and UsedBytes updated when createKey and // AllocatedBlock. The space occupied by the Key shall be based on // the actual Key size, and the total Block size applied before should // be subtracted. long correctedSpace = omKeyInfo.getDataSize() * factor - locationInfoList.size() * scmBlockSize * factor; - omVolumeArgs.getUsedBytes().add(correctedSpace); - omBucketInfo.getUsedBytes().add(correctedSpace); + omBucketInfo.incrUsedBytes(correctedSpace); omClientResponse = new OMKeyCommitResponse(omResponse.build(), - omKeyInfo, dbOzoneKey, dbOpenKey, omVolumeArgs, omBucketInfo); + omKeyInfo, dbOzoneKey, dbOpenKey, omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index 3205fbfdbe27..86c62abb2470 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -301,7 +301,6 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, * omKeyInfo.getFactor().getNumber(); // check bucket and volume quota checkBucketQuotaInBytes(omBucketInfo, preAllocatedSpace); - checkVolumeQuotaInBytes(omVolumeArgs, preAllocatedSpace); // Add to cache entry can be done outside of lock for this openKey. // Even if bucket gets deleted, when commitKey we shall identify if @@ -310,8 +309,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, new CacheKey<>(dbOpenKeyName), new CacheValue<>(Optional.of(omKeyInfo), trxnLogIndex)); - omVolumeArgs.getUsedBytes().add(preAllocatedSpace); - omBucketInfo.getUsedBytes().add(preAllocatedSpace); + omBucketInfo.incrUsedBytes(preAllocatedSpace); // Prepare response omResponse.setCreateKeyResponse(CreateKeyResponse.newBuilder() @@ -320,7 +318,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, .setOpenVersion(openVersion).build()) .setCmdType(Type.CreateKey); omClientResponse = new OMKeyCreateResponse(omResponse.build(), - omKeyInfo, missingParentInfos, clientID, omVolumeArgs, omBucketInfo); + omKeyInfo, missingParentInfos, clientID, omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index 43d9c2ddbb0c..a99c02bc0094 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -147,9 +147,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); long quotaReleased = sumBlockLengths(omKeyInfo); - // update usedBytes atomically. - omVolumeArgs.getUsedBytes().add(-quotaReleased); - omBucketInfo.getUsedBytes().add(-quotaReleased); + omBucketInfo.incrUsedBytes(-quotaReleased); // No need to add cache entries to delete table. As delete table will // be used by DeleteKeyService only, not used for any client response @@ -158,7 +156,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new OMKeyDeleteResponse(omResponse .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(), - omKeyInfo, ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); + omKeyInfo, ozoneManager.isRatisEnabled(), omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java index f1e2bfcfe36c..553f7f0fd33b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRequest.java @@ -569,27 +569,6 @@ protected FileEncryptionInfo getFileEncryptionInfo(KeyArgs keyArgs) { return encryptionInfo; } - /** - * Check volume quota in bytes. - * @param omVolumeArgs - * @param allocateSize - * @throws IOException - */ - protected void checkVolumeQuotaInBytes(OmVolumeArgs omVolumeArgs, - long allocateSize) throws IOException { - if (omVolumeArgs.getQuotaInBytes() > OzoneConsts.QUOTA_RESET) { - long usedBytes = omVolumeArgs.getUsedBytes().sum(); - long quotaInBytes = omVolumeArgs.getQuotaInBytes(); - if (quotaInBytes - usedBytes < allocateSize) { - throw new OMException("The DiskSpace quota of volume:" - + omVolumeArgs.getVolume() + "exceeded: quotaInBytes: " - + quotaInBytes + " Bytes but diskspace consumed: " + (usedBytes - + allocateSize) + " Bytes.", - OMException.ResultCodes.QUOTA_EXCEEDED); - } - } - } - /** * Check bucket quota in bytes. * @param omBucketInfo @@ -599,7 +578,7 @@ protected void checkVolumeQuotaInBytes(OmVolumeArgs omVolumeArgs, protected void checkBucketQuotaInBytes(OmBucketInfo omBucketInfo, long allocateSize) throws IOException { if (omBucketInfo.getQuotaInBytes() > OzoneConsts.QUOTA_RESET) { - long usedBytes = omBucketInfo.getUsedBytes().sum(); + long usedBytes = omBucketInfo.getUsedBytes(); long quotaInBytes = omBucketInfo.getQuotaInBytes(); if (quotaInBytes - usedBytes < allocateSize) { throw new OMException("The DiskSpace quota of bucket:" diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index 71e15f541819..3dc22e8fa25b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -170,16 +170,15 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled()); quotaReleased += sumBlockLengths(omKeyInfo); } - // update usedBytes atomically. - omVolumeArgs.getUsedBytes().add(-quotaReleased); - omBucketInfo.getUsedBytes().add(-quotaReleased); + omBucketInfo.incrUsedBytes(-quotaReleased); omClientResponse = new OMKeysDeleteResponse(omResponse .setDeleteKeysResponse(DeleteKeysResponse.newBuilder() .setStatus(deleteStatus).setUnDeletedKeys(unDeletedKeys)) .setStatus(deleteStatus ? OK : PARTIAL_DELETE) .setSuccess(deleteStatus).build(), omKeyInfoList, - ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); + ozoneManager.isRatisEnabled(), omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 8b53e7045dcc..42dc85d705e4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -152,8 +152,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, quotaReleased += iterPartKeyInfo.getPartKeyInfo().getDataSize() * keyFactor; } - omVolumeArgs.getUsedBytes().add(-quotaReleased); - omBucketInfo.getUsedBytes().add(-quotaReleased); + omBucketInfo.incrUsedBytes(-quotaReleased); // Update cache of openKeyTable and multipartInfo table. // No need to add the cache entries to delete table, as the entries @@ -169,7 +168,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omResponse.setAbortMultiPartUploadResponse( MultipartUploadAbortResponse.newBuilder()).build(), multipartKey, multipartKeyInfo, ozoneManager.isRatisEnabled(), - omVolumeArgs, omBucketInfo); + omVolumeArgs, omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index f471de4eab47..78c8623ebf3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -118,6 +118,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Result result = null; OmVolumeArgs omVolumeArgs = null; OmBucketInfo omBucketInfo = null; + OmBucketInfo copyBucketInfo = null; try { keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); volumeName = keyArgs.getVolumeName(); @@ -215,15 +216,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, int factor = omKeyInfo.getFactor().getNumber(); omVolumeArgs = getVolumeInfo(omMetadataManager, volumeName); omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); - // update usedBytes atomically. // Block was pre-requested and UsedBytes updated when createKey and // AllocatedBlock. The space occupied by the Key shall be based on // the actual Key size, and the total Block size applied before should // be subtracted. long correctedSpace = omKeyInfo.getDataSize() * factor - keyArgs.getKeyLocationsList().size() * scmBlockSize * factor; - omVolumeArgs.getUsedBytes().add(correctedSpace); - omBucketInfo.getUsedBytes().add(correctedSpace); + omBucketInfo.incrUsedBytes(correctedSpace); omResponse.setCommitMultiPartUploadResponse( MultipartCommitUploadPartResponse.newBuilder() @@ -231,7 +230,8 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new S3MultipartUploadCommitPartResponse( omResponse.build(), multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, omKeyInfo, - ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); + ozoneManager.isRatisEnabled(), omVolumeArgs, + omBucketInfo.copyObject()); result = Result.SUCCESS; } catch (IOException ex) { @@ -240,7 +240,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omClientResponse = new S3MultipartUploadCommitPartResponse( createErrorOMResponse(omResponse, exception), multipartKey, openKey, multipartKeyInfo, oldPartKeyInfo, omKeyInfo, - ozoneManager.isRatisEnabled(), omVolumeArgs, omBucketInfo); + ozoneManager.isRatisEnabled(), omVolumeArgs, copyBucketInfo); } finally { addResponseToDoubleBuffer(trxnLogIndex, omClientResponse, omDoubleBufferHelper); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java index 3995b5572dab..acc43eef8981 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMAllocateBlockResponse.java @@ -72,10 +72,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation, openKey, omKeyInfo); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java index aede2ec18e91..8e2f6dce8070 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCommitResponse.java @@ -76,10 +76,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getKeyTable().putWithBatch(batchOperation, ozoneKeyName, omKeyInfo); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java index 86224a1a0b6f..60f6bfe32504 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyCreateResponse.java @@ -100,10 +100,6 @@ protected void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getOpenKeyTable().putWithBatch(batchOperation, openKey, omKeyInfo); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java index f9c6d185f398..e85670154074 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeyDeleteResponse.java @@ -73,10 +73,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, addDeletionToBatch(omMetadataManager, batchOperation, keyTable, ozoneKey, omKeyInfo); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java index bf1a8ddfe387..00a23fcbbc86 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMKeysDeleteResponse.java @@ -89,10 +89,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, deleteKey, omKeyInfo); } - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java index 73ae49eeec76..b11a7327306d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadAbortResponse.java @@ -104,10 +104,6 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, omMetadataManager.getDeletedTable().putWithBatch(batchOperation, partKeyInfo.getPartName(), repeatedOmKeyInfo); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java index 7e8ac55a6dd5..496175fc3822 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCommitPartResponse.java @@ -151,10 +151,7 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, // safely delete part key info from open key table. omMetadataManager.getOpenKeyTable().deleteWithBatch(batchOperation, openKey); - // update volume usedBytes. - omMetadataManager.getVolumeTable().putWithBatch(batchOperation, - omMetadataManager.getVolumeKey(omVolumeArgs.getVolume()), - omVolumeArgs); + // update bucket usedBytes. omMetadataManager.getBucketTable().putWithBatch(batchOperation, omMetadataManager.getBucketKey(omVolumeArgs.getVolume(), diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java index c315ff0081a5..6011a973157b 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/bucket/TestOMBucketSetPropertyRequest.java @@ -148,7 +148,8 @@ public void testValidateAndUpdateCacheWithQuota() throws Exception { } catch (IllegalArgumentException ex) { countException++; GenericTestUtils.assertExceptionContains( - "Total buckets quota in this volume should not be", ex); + "Total buckets quota in this volume should not be " + + "greater than volume quota", ex); } Assert.assertEquals(1, countException); } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java index f1e2400c7eea..340c2f5cee46 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/volume/TestOMVolumeSetQuotaRequest.java @@ -187,7 +187,8 @@ public void testValidateAndUpdateCacheWithQuota() throws Exception { } catch (IllegalArgumentException ex) { countException++; GenericTestUtils.assertExceptionContains( - "Total buckets quota in this volume should not be", ex); + "Total buckets quota in this volume should not be " + + "greater than volume quota", ex); } Assert.assertEquals(1, countException); } From a4cd12c8567245699ba5c4aea6d1865d8c299017 Mon Sep 17 00:00:00 2001 From: wycccccc <43372856+wycccccc@users.noreply.github.com> Date: Wed, 25 Nov 2020 15:44:55 +0800 Subject: [PATCH 38/51] HDDS-4471. GrpcOutputStream length can overflow (#1617) --- .../hadoop/ozone/container/replication/GrpcOutputStream.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java index 4303bb16bab8..c09c8f6743e7 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/GrpcOutputStream.java @@ -44,7 +44,7 @@ class GrpcOutputStream extends OutputStream { private final int bufferSize; - private int writtenBytes; + private long writtenBytes; GrpcOutputStream( StreamObserver responseObserver, From fdb373fd29ae4f71cb350a801d0666e95cc1fc34 Mon Sep 17 00:00:00 2001 From: Aryan Gupta <44232823+aryangupta1998@users.noreply.github.com> Date: Wed, 25 Nov 2020 15:45:08 +0530 Subject: [PATCH 39/51] HDDS-4487. SCM can avoid using RETRIABLE_DATANODE_COMMAND for datanode deletion commands. (#1621) --- .../apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java index 2d91bd60adad..fbf56543aeef 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java @@ -141,7 +141,7 @@ public EmptyTaskResult call() throws Exception { // We should stop caching new commands if num of un-processed // command is bigger than a limit, e.g 50. In case datanode goes // offline for sometime, the cached commands be flooded. - eventPublisher.fireEvent(SCMEvents.RETRIABLE_DATANODE_COMMAND, + eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, new CommandForDatanode<>(dnId, new DeleteBlocksCommand(dnTXs))); if (LOG.isDebugEnabled()) { From d83ec1af165677d7c063ff574d32ae2abdd4ff5d Mon Sep 17 00:00:00 2001 From: Bharat Viswanadham Date: Wed, 25 Nov 2020 07:41:49 -0800 Subject: [PATCH 40/51] HDDS-4481. With HA OM can send deletion blocks to SCM multiple times. (#1608) --- .../hadoop/ozone/om/TestOzoneManagerHA.java | 47 +++++++++++ .../om/TestOzoneManagerHAKeyDeletion.java | 77 +++++++++++++++++++ .../ozone/om/TestOzoneManagerHAWithData.java | 37 --------- .../hadoop/ozone/om/KeyDeletingService.java | 19 ++++- .../om/ratis/OzoneManagerRatisServer.java | 35 ++++++++- 5 files changed, 172 insertions(+), 43 deletions(-) create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAKeyDeletion.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java index daca5c309678..4a2ccbb8e46f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHA.java @@ -26,10 +26,12 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.VolumeArgs; import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.client.rpc.RpcClient; import org.apache.hadoop.ozone.om.ha.OMFailoverProxyProvider; @@ -51,9 +53,11 @@ import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_RETRY_INTERVAL_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; +import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_KEY_DELETING_LIMIT_PER_TASK; import static org.junit.Assert.fail; /** @@ -139,6 +143,12 @@ public void init() throws Exception { conf.setLong( OMConfigKeys.OZONE_OM_RATIS_SNAPSHOT_AUTO_TRIGGER_THRESHOLD_KEY, SNAPSHOT_THRESHOLD); + + /** + * config for key deleting service. + */ + conf.set(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, "10s"); + conf.set(OZONE_KEY_DELETING_LIMIT_PER_TASK, "2"); cluster = (MiniOzoneHAClusterImpl) MiniOzoneCluster.newHABuilder(conf) .setClusterId(clusterId) .setScmId(scmId) @@ -261,4 +271,41 @@ protected void createVolumeTest(boolean checkSuccess) throws Exception { } } } + + /** + * This method createFile and verifies the file is successfully created or + * not. + * @param ozoneBucket + * @param keyName + * @param data + * @param recursive + * @param overwrite + * @throws Exception + */ + protected void testCreateFile(OzoneBucket ozoneBucket, String keyName, + String data, boolean recursive, boolean overwrite) + throws Exception { + + OzoneOutputStream ozoneOutputStream = ozoneBucket.createFile(keyName, + data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, + overwrite, recursive); + + ozoneOutputStream.write(data.getBytes(), 0, data.length()); + ozoneOutputStream.close(); + + OzoneKeyDetails ozoneKeyDetails = ozoneBucket.getKey(keyName); + + Assert.assertEquals(keyName, ozoneKeyDetails.getName()); + Assert.assertEquals(ozoneBucket.getName(), ozoneKeyDetails.getBucketName()); + Assert.assertEquals(ozoneBucket.getVolumeName(), + ozoneKeyDetails.getVolumeName()); + Assert.assertEquals(data.length(), ozoneKeyDetails.getDataSize()); + + OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName); + + byte[] fileContent = new byte[data.getBytes().length]; + ozoneInputStream.read(fileContent); + Assert.assertEquals(data, new String(fileContent)); + } + } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAKeyDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAKeyDeletion.java new file mode 100644 index 000000000000..52449a2b21fa --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAKeyDeletion.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ + +package org.apache.hadoop.ozone.om; + +import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.fail; + +public class TestOzoneManagerHAKeyDeletion extends TestOzoneManagerHA { + + @Test + public void testKeyDeletion() throws Exception { + OzoneBucket ozoneBucket = setupBucket(); + String data = "random data"; + String keyName1 = "dir/file1"; + String keyName2 = "dir/file2"; + String keyName3 = "dir/file3"; + String keyName4 = "dir/file4"; + List keyList1 = new ArrayList<>(); + keyList1.add(keyName2); + keyList1.add(keyName3); + + testCreateFile(ozoneBucket, keyName1, data, true, false); + testCreateFile(ozoneBucket, keyName2, data, true, false); + testCreateFile(ozoneBucket, keyName3, data, true, false); + testCreateFile(ozoneBucket, keyName4, data, true, false); + + ozoneBucket.deleteKey(keyName1); + ozoneBucket.deleteKey(keyName2); + ozoneBucket.deleteKey(keyName3); + ozoneBucket.deleteKey(keyName4); + + // Now check delete table has entries been removed. + + OzoneManager ozoneManager = getCluster().getOMLeader(); + + KeyDeletingService keyDeletingService = + (KeyDeletingService) ozoneManager.getKeyManager().getDeletingService(); + + // Check on leader OM Count. + GenericTestUtils.waitFor(() -> + keyDeletingService.getRunCount().get() >= 2, 10000, 120000); + GenericTestUtils.waitFor(() -> + keyDeletingService.getDeletedKeyCount().get() == 4, 10000, 120000); + + // Check delete table is empty or not on all OMs. + getCluster().getOzoneManagersList().forEach((om) -> { + try { + GenericTestUtils.waitFor(() -> + !om.getMetadataManager().getDeletedTable().iterator().hasNext(), + 10000, 120000); + } catch (Exception ex) { + fail("TestOzoneManagerHAKeyDeletion failed"); + } + }); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java index aed84f5dd604..3a3d82be9d0f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithData.java @@ -23,7 +23,6 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; -import org.apache.hadoop.ozone.client.OzoneKeyDetails; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.VolumeArgs; @@ -251,42 +250,6 @@ public void testFileOperationsWithNonRecursive() throws Exception { } - /** - * This method createFile and verifies the file is successfully created or - * not. - * @param ozoneBucket - * @param keyName - * @param data - * @param recursive - * @param overwrite - * @throws Exception - */ - public void testCreateFile(OzoneBucket ozoneBucket, String keyName, - String data, boolean recursive, boolean overwrite) - throws Exception { - - OzoneOutputStream ozoneOutputStream = ozoneBucket.createFile(keyName, - data.length(), ReplicationType.RATIS, ReplicationFactor.ONE, - overwrite, recursive); - - ozoneOutputStream.write(data.getBytes(), 0, data.length()); - ozoneOutputStream.close(); - - OzoneKeyDetails ozoneKeyDetails = ozoneBucket.getKey(keyName); - - Assert.assertEquals(keyName, ozoneKeyDetails.getName()); - Assert.assertEquals(ozoneBucket.getName(), ozoneKeyDetails.getBucketName()); - Assert.assertEquals(ozoneBucket.getVolumeName(), - ozoneKeyDetails.getVolumeName()); - Assert.assertEquals(data.length(), ozoneKeyDetails.getDataSize()); - - OzoneInputStream ozoneInputStream = ozoneBucket.readKey(keyName); - - byte[] fileContent = new byte[data.getBytes().length]; - ozoneInputStream.read(fileContent); - Assert.assertEquals(data, new String(fileContent)); - } - @Test public void testMultipartUploadWithOneOmNodeDown() throws Exception { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java index 345a4463137c..466a55f18300 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyDeletingService.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; +import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.DeletedKeys; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PurgeKeysRequest; @@ -51,6 +52,8 @@ import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.ratis.protocol.ClientId; +import org.apache.ratis.protocol.Message; +import org.apache.ratis.protocol.RaftClientRequest; import org.apache.ratis.util.Preconditions; import org.rocksdb.RocksDBException; import org.slf4j.Logger; @@ -74,7 +77,7 @@ public class KeyDeletingService extends BackgroundService { private final OzoneManager ozoneManager; private final ScmBlockLocationProtocol scmClient; private final KeyManager manager; - private ClientId clientId = ClientId.randomId(); + private static ClientId clientId = ClientId.randomId(); private final int keyLimitPerTask; private final AtomicLong deletedKeyCount; private final AtomicLong runCount; @@ -264,7 +267,10 @@ public int submitPurgeKeysRequest(List results) { // Submit PurgeKeys request to OM try { - ozoneManager.getOmServerProtocol().submitRequest(null, omRequest); + RaftClientRequest raftClientRequest = + createRaftClientRequestForPurge(omRequest); + ozoneManager.getOmRatisServer().submitRequest(omRequest, + raftClientRequest); } catch (ServiceException e) { LOG.error("PurgeKey request failed. Will retry at next run."); return 0; @@ -274,6 +280,15 @@ public int submitPurgeKeysRequest(List results) { } } + private RaftClientRequest createRaftClientRequestForPurge( + OMRequest omRequest) { + return new RaftClientRequest(clientId, + ozoneManager.getOmRatisServer().getRaftPeerId(), + ozoneManager.getOmRatisServer().getRaftGroupId(), runCount.get(), + Message.valueOf(OMRatisHelper.convertRequestToByteString(omRequest)), + RaftClientRequest.writeRequestType(), null); + } + /** * Parse Volume and Bucket Name from ObjectKey and add it to given map of * keys to be purged per bucket. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java index 0bf58ba4d1f3..467764b2f6a0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerRatisServer.java @@ -34,6 +34,7 @@ import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantReadWriteLock; +import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.server.ServerUtils; @@ -86,6 +87,9 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.ipc.RpcConstants.DUMMY_CLIENT_ID; +import static org.apache.hadoop.ipc.RpcConstants.INVALID_CALL_ID; + /** * Creates a Ratis server endpoint for OM. */ @@ -126,15 +130,32 @@ private static long nextCallId() { public OMResponse submitRequest(OMRequest omRequest) throws ServiceException { RaftClientRequest raftClientRequest = createWriteRaftClientRequest(omRequest); - RaftClientReply raftClientReply; + RaftClientReply raftClientReply = submitRequestToRatis(raftClientRequest); + return processReply(omRequest, raftClientReply); + } + + /** + * API used internally from OzoneManager Server when requests needs to be + * submitted to ratis, where the crafted RaftClientRequest is passed along. + * @param omRequest + * @param raftClientRequest + * @return OMResponse + * @throws ServiceException + */ + public OMResponse submitRequest(OMRequest omRequest, + RaftClientRequest raftClientRequest) throws ServiceException { + RaftClientReply raftClientReply = submitRequestToRatis(raftClientRequest); + return processReply(omRequest, raftClientReply); + } + + private RaftClientReply submitRequestToRatis( + RaftClientRequest raftClientRequest) throws ServiceException { try { - raftClientReply = server.submitClientRequestAsync(raftClientRequest) + return server.submitClientRequestAsync(raftClientRequest) .get(); } catch (Exception ex) { throw new ServiceException(ex.getMessage(), ex); } - - return processReply(omRequest, raftClientReply); } /** @@ -144,6 +165,8 @@ public OMResponse submitRequest(OMRequest omRequest) throws ServiceException { * ratis server. */ private RaftClientRequest createWriteRaftClientRequest(OMRequest omRequest) { + Preconditions.checkArgument(Server.getClientId() != DUMMY_CLIENT_ID); + Preconditions.checkArgument(Server.getCallId() != INVALID_CALL_ID); return new RaftClientRequest( ClientId.valueOf(UUID.nameUUIDFromBytes(Server.getClientId())), server.getId(), raftGroupId, Server.getCallId(), @@ -714,4 +737,8 @@ public static String getOMRatisSnapshotDirectory(ConfigurationSource conf) { public TermIndex getLastAppliedTermIndex() { return omStateMachine.getLastAppliedTermIndex(); } + + public RaftGroupId getRaftGroupId() { + return raftGroupId; + } } From 1235430d78892cfcc2449f11581daac88b8f88bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Elek=2C=20M=C3=A1rton?= Date: Wed, 25 Nov 2020 17:58:00 +0100 Subject: [PATCH 41/51] HDDS-4512. Remove unused netty3 transitive dependency (#1627) --- hadoop-hdds/client/pom.xml | 5 ----- hadoop-hdds/container-service/pom.xml | 11 +++++++--- .../container/common/volume/HddsVolume.java | 4 ++-- hadoop-hdds/framework/pom.xml | 20 +++++++++++++++---- hadoop-hdds/hadoop-dependency-server/pom.xml | 6 ++++++ hadoop-hdds/hadoop-dependency-test/pom.xml | 12 +++++++++++ hadoop-hdds/pom.xml | 3 +++ hadoop-hdds/server-scm/pom.xml | 6 ++++++ hadoop-ozone/datanode/pom.xml | 2 +- hadoop-ozone/integration-test/pom.xml | 6 ++++++ hadoop-ozone/interface-storage/pom.xml | 14 +++++++++---- hadoop-ozone/ozone-manager/pom.xml | 6 ++++++ hadoop-ozone/pom.xml | 7 +++++++ hadoop-ozone/recon/pom.xml | 6 ++++++ hadoop-ozone/tools/pom.xml | 2 +- pom.xml | 6 ------ 16 files changed, 90 insertions(+), 26 deletions(-) diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index 608839e82dd6..e1b51e8bba98 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -51,11 +51,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test - - io.netty - netty-all - - org.apache.hadoop hadoop-hdds-hadoop-dependency-test diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index b71f8e3471e7..aaa5302b4b60 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -46,6 +46,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-common test-jar + test org.apache.hadoop @@ -55,6 +56,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-client + + commons-codec + commons-codec + io.dropwizard.metrics metrics-core @@ -98,11 +103,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test - org.apache.hadoop - hadoop-hdfs + org.slf4j + slf4j-log4j12 test - test-jar + diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 66cd6573dc33..1dee1bac0e8a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -27,6 +27,8 @@ import java.util.concurrent.atomic.AtomicLong; import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdds.annotation.InterfaceAudience; +import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; @@ -40,8 +42,6 @@ import org.apache.hadoop.util.Time; import com.google.common.base.Preconditions; -import org.apache.yetus.audience.InterfaceAudience; -import org.apache.yetus.audience.InterfaceStability; import org.slf4j.Logger; import org.slf4j.LoggerFactory; diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 91eb43c83465..4f9866995750 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -51,6 +51,22 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop hadoop-hdds-hadoop-dependency-server + + org.eclipse.jetty + jetty-util + + + org.eclipse.jetty + jetty-server + + + org.eclipse.jetty + jetty-servlet + + + org.eclipse.jetty + jetty-webapp + ratis-server org.apache.ratis @@ -69,10 +85,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.eclipse.jetty - jetty-util - org.rocksdb rocksdbjni diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 06f0f87da478..642898885ae2 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -83,6 +83,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-hdfs ${hadoop.version} compile + + + io.netty + * + + diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index 0dcbcc4fcaf0..c45421e95cd5 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -35,12 +35,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-common ${hadoop.version} test-jar + + + * + * + + org.apache.hadoop hadoop-hdfs ${hadoop.version} test-jar + + + * + * + + diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 6ebccf7bc8cf..f2f6c7d444fa 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -160,6 +160,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-hdds-common ${hdds.version} test-jar + test @@ -167,6 +168,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-hdds-container-service ${hdds.version} test-jar + test @@ -174,6 +176,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-hdds-server-scm test-jar ${hdds.version} + test diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index c007ef1ec214..179d3d742e21 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -90,6 +90,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test test-jar + + org.apache.hadoop + hadoop-hdds-common + test-jar + test + org.hamcrest diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 29f23970167a..3cf2e850b22c 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -40,7 +40,7 @@ org.apache.hadoop - hadoop-hdfs + hadoop-hdds-hadoop-dependency-server compile diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index ebfe1c0057c8..7291540857b6 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -92,6 +92,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test test-jar + + org.apache.hadoop + hadoop-hdds-common + test-jar + test + junit junit diff --git a/hadoop-ozone/interface-storage/pom.xml b/hadoop-ozone/interface-storage/pom.xml index 5c9ae432baad..28ab75f0db0c 100644 --- a/hadoop-ozone/interface-storage/pom.xml +++ b/hadoop-ozone/interface-storage/pom.xml @@ -13,8 +13,8 @@ limitations under the License. See accompanying LICENSE file. --> 4.0.0 @@ -51,14 +51,20 @@ org.apache.hadoop - hadoop-hdds-hadoop-dependency-test + hadoop-hdds-common + test-jar test org.apache.hadoop hadoop-hdds-server-scm - test test-jar + test + + + org.apache.hadoop + hadoop-hdds-test-utils + test diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 0d239d07fdda..dc9075360bc1 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -86,6 +86,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> spotbugs provided + + org.apache.hadoop + hadoop-hdds-common + test-jar + test + org.apache.hadoop hadoop-hdds-hadoop-dependency-test diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 444de5fd3ded..b4b91e13857a 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -159,6 +159,13 @@ ${hdds.version} test + + org.apache.hadoop + hadoop-hdds-common + ${hdds.version} + test-jar + test + org.apache.hadoop hadoop-ozone-integration-test diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index 7338dad76b8b..b67e004efc07 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -262,6 +262,12 @@ org.glassfish.jersey.inject jersey-hk2 + + org.apache.hadoop + hadoop-hdds-common + test-jar + test + junit junit diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 0fbc7f1a477c..f0e6d85832e5 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -75,7 +75,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.hadoop - hadoop-hdfs + hadoop-hdds-hadoop-dependency-server org.apache.ratis diff --git a/pom.xml b/pom.xml index 07ec99d586ca..05c34d55bd41 100644 --- a/pom.xml +++ b/pom.xml @@ -1019,12 +1019,6 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${ratis.version} - - io.netty - netty - 3.10.5.Final - - io.netty netty-all From 43fdd712583f7bc49212f86d7a760608ce52eedb Mon Sep 17 00:00:00 2001 From: Aryan Gupta <44232823+aryangupta1998@users.noreply.github.com> Date: Thu, 26 Nov 2020 14:45:37 +0530 Subject: [PATCH 42/51] HDDS-4370. Datanode deletion service can avoid storing deleted blocks. (#1620) --- .../background/BlockDeletingService.java | 19 +- .../common/TestBlockDeletingService.java | 326 ++++++++++-------- .../TestSchemaOneBackwardsCompatibility.java | 83 +++-- 3 files changed, 236 insertions(+), 192 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java index a373c21e89a0..b03b7d7ad657 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java @@ -29,7 +29,6 @@ import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -41,9 +40,7 @@ import org.apache.hadoop.hdds.utils.MetadataKeyFilters; import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter; import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy; import org.apache.hadoop.ozone.container.common.interfaces.Container; @@ -293,29 +290,15 @@ public BackgroundTaskResult call() throws Exception { } } - // Once files are deleted... replace deleting entries with deleted - // entries + // Once blocks are deleted... remove the blockID from blockDataTable. try(BatchOperation batch = meta.getStore().getBatchHandler() .initBatchOperation()) { - Table< String, ChunkInfoList > deletedBlocksTable = - meta.getStore().getDeletedBlocksTable(); for (String entry : succeedBlocks) { - List< ContainerProtos.ChunkInfo > chunkList = - blockDataTable.get(entry).getChunks(); - String blockId = entry.substring( - OzoneConsts.DELETING_KEY_PREFIX.length()); - - deletedBlocksTable.putWithBatch( - batch, blockId, - new ChunkInfoList(chunkList)); blockDataTable.deleteWithBatch(batch, entry); } - int deleteBlockCount = succeedBlocks.size(); containerData.updateAndCommitDBCounters(meta, batch, deleteBlockCount); - - // update count of pending deletion blocks and block count in // in-memory container status. containerData.decrPendingDeletionBlocks(deleteBlockCount); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java index 2fb577c79f98..2eb6a394e060 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java @@ -20,8 +20,7 @@ import java.io.File; import java.io.IOException; -import java.time.Duration; -import java.util.Iterator; +import java.nio.ByteBuffer; import java.util.List; import java.util.Map; import java.util.UUID; @@ -38,37 +37,40 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.utils.BackgroundService; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; -import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.Checksum; +import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.helpers.BlockData; -import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; +import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; +import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.interfaces.Handler; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy; +import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy; +import org.apache.hadoop.ozone.container.keyvalue.impl.FilePerChunkStrategy; +import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService; import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer; import org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.GenericTestUtils.LogCapturer; +import static org.apache.commons.lang3.RandomStringUtils.randomAlphanumeric; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; -import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -76,12 +78,15 @@ import org.junit.runner.RunWith; import org.junit.runners.Parameterized; - +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static java.nio.charset.StandardCharsets.UTF_8; /** * Tests to test block deleting service. @@ -92,9 +97,12 @@ public class TestBlockDeletingService { private static File testRoot; private static String scmId; private static String clusterID; - private Handler handler; + private static String datanodeUuid; + private static MutableConfigurationSource conf; private final ChunkLayOutVersion layout; + private int blockLimitPerTask; + private static VolumeSet volumeSet; public TestBlockDeletingService(ChunkLayOutVersion layout) { this.layout = layout; @@ -114,6 +122,10 @@ public static void init() throws IOException { } scmId = UUID.randomUUID().toString(); clusterID = UUID.randomUUID().toString(); + conf = new OzoneConfiguration(); + conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); + datanodeUuid = UUID.randomUUID().toString(); + volumeSet = new MutableVolumeSet(datanodeUuid, conf); } @AfterClass @@ -121,31 +133,45 @@ public static void cleanup() throws IOException { FileUtils.deleteDirectory(testRoot); } + private static final DispatcherContext WRITE_STAGE = + new DispatcherContext.Builder() + .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build(); + + private static final DispatcherContext COMMIT_STAGE = + new DispatcherContext.Builder() + .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build(); + /** * A helper method to create some blocks and put them under deletion * state for testing. This method directly updates container.db and * creates some fake chunk files for testing. */ private void createToDeleteBlocks(ContainerSet containerSet, - MutableConfigurationSource conf, int numOfContainers, + int numOfContainers, int numOfBlocksPerContainer, int numOfChunksPerBlock) throws IOException { + ChunkManager chunkManager; + if (layout == FILE_PER_BLOCK) { + chunkManager = new FilePerBlockStrategy(true); + } else { + chunkManager = new FilePerChunkStrategy(true, null); + } + byte[] arr = randomAlphanumeric(1048576).getBytes(UTF_8); + ChunkBuffer buffer = ChunkBuffer.wrap(ByteBuffer.wrap(arr)); for (int x = 0; x < numOfContainers; x++) { - conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath()); long containerID = ContainerTestHelper.getTestContainerID(); - KeyValueContainerData data = new KeyValueContainerData(containerID, - layout, - ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(), - UUID.randomUUID().toString()); + KeyValueContainerData data = + new KeyValueContainerData(containerID, layout, + ContainerTestHelper.CONTAINER_MAX_SIZE, + UUID.randomUUID().toString(), datanodeUuid); data.closeContainer(); KeyValueContainer container = new KeyValueContainer(data, conf); - container.create(new MutableVolumeSet(scmId, clusterID, conf), + container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId); containerSet.addContainer(container); data = (KeyValueContainerData) containerSet.getContainer( containerID).getContainerData(); - - long blockLength = 100; + long chunkLength = 100; try(ReferenceCountedDB metadata = BlockUtils.getDB(data, conf)) { for (int j = 0; j < numOfBlocksPerContainer; j++) { BlockID blockID = @@ -155,30 +181,35 @@ private void createToDeleteBlocks(ContainerSet containerSet, BlockData kd = new BlockData(blockID); List chunks = Lists.newArrayList(); for (int k = 0; k < numOfChunksPerBlock; k++) { + final String chunkName = String.format("block.%d.chunk.%d", j, k); + final long offset = k * chunkLength; ContainerProtos.ChunkInfo info = ContainerProtos.ChunkInfo.newBuilder() - .setChunkName(blockID.getLocalID() + "_chunk_" + k) - .setLen(blockLength) - .setOffset(0) + .setChunkName(chunkName) + .setLen(chunkLength) + .setOffset(offset) .setChecksumData(Checksum.getNoChecksumDataProto()) .build(); chunks.add(info); + ChunkInfo chunkInfo = new ChunkInfo(chunkName, offset, chunkLength); + ChunkBuffer chunkData = buffer.duplicate(0, (int) chunkLength); + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, + WRITE_STAGE); + chunkManager.writeChunk(container, blockID, chunkInfo, chunkData, + COMMIT_STAGE); } kd.setChunks(chunks); metadata.getStore().getBlockDataTable().put( deleteStateName, kd); container.getContainerData().incrPendingDeletionBlocks(1); } - container.getContainerData().setKeyCount(numOfBlocksPerContainer); - container.getContainerData().setBytesUsed( - blockLength * numOfBlocksPerContainer); // Set block count, bytes used and pending delete block count. metadata.getStore().getMetadataTable().put( OzoneConsts.BLOCK_COUNT, (long)numOfBlocksPerContainer); metadata.getStore().getMetadataTable().put( OzoneConsts.CONTAINER_BYTES_USED, - blockLength * numOfBlocksPerContainer); + chunkLength * numOfChunksPerBlock * numOfBlocksPerContainer); metadata.getStore().getMetadataTable().put( OzoneConsts.PENDING_DELETE_BLOCK_COUNT, (long)numOfBlocksPerContainer); @@ -207,21 +238,23 @@ private int getUnderDeletionBlocksCount(ReferenceCountedDB meta) MetadataKeyFilters.getDeletingKeyFilter()).size(); } - private int getDeletedBlocksCount(ReferenceCountedDB db) throws IOException { - return db.getStore().getDeletedBlocksTable() - .getRangeKVs(null, 100).size(); - } @Test public void testBlockDeletion() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); + this.blockLimitPerTask = + conf.getInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, + OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT); ContainerSet containerSet = new ContainerSet(); - createToDeleteBlocks(containerSet, conf, 1, 3, 1); - + createToDeleteBlocks(containerSet, 1, 3, 1); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); BlockDeletingServiceTestImpl svc = - getBlockDeletingService(containerSet, conf); + getBlockDeletingService(containerSet, conf, keyValueHandler); svc.start(); GenericTestUtils.waitFor(svc::isStarted, 100, 3000); @@ -240,40 +273,43 @@ public void testBlockDeletion() throws Exception { .get(containerData.get(0).getContainerID()).getContainerData()) .getDeleteTransactionId(); - + long containerSpace = containerData.get(0).getBytesUsed(); // Number of deleted blocks in container should be equal to 0 before // block delete + Assert.assertEquals(0, transactionId); // Ensure there are 3 blocks under deletion and 0 deleted blocks Assert.assertEquals(3, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(3, - meta.getStore().getMetadataTable() - .get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue()); - Assert.assertEquals(0, getDeletedBlocksCount(meta)); + Assert.assertEquals(3, meta.getStore().getMetadataTable() + .get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue()); + + // Container contains 3 blocks. So, space used by the container + // should be greater than zero. + Assert.assertTrue(containerSpace > 0); // An interval will delete 1 * 2 blocks deleteAndWait(svc, 1); - Assert.assertEquals(1, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(2, getDeletedBlocksCount(meta)); - deleteAndWait(svc, 2); - Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(3, getDeletedBlocksCount(meta)); + // After first interval 2 blocks will be deleted. Hence, current space + // used by the container should be less than the space used by the + // container initially(before running deletion services). + Assert.assertTrue(containerData.get(0).getBytesUsed() < containerSpace); - deleteAndWait(svc, 3); - Assert.assertEquals(0, getUnderDeletionBlocksCount(meta)); - Assert.assertEquals(3, getDeletedBlocksCount(meta)); + deleteAndWait(svc, 2); + // After deletion of all 3 blocks, space used by the containers + // should be zero. + containerSpace = containerData.get(0).getBytesUsed(); + Assert.assertTrue(containerSpace == 0); // Check finally DB counters. // Not checking bytes used, as handler is a mock call. + Assert.assertEquals(0, meta.getStore().getMetadataTable() + .get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue()); Assert.assertEquals(0, - meta.getStore().getMetadataTable() - .get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue()); - Assert.assertEquals(0, - meta.getStore().getMetadataTable() - .get(OzoneConsts.BLOCK_COUNT).longValue()); + meta.getStore().getMetadataTable().get(OzoneConsts.BLOCK_COUNT) + .longValue()); } svc.shutdown(); @@ -282,19 +318,20 @@ public void testBlockDeletion() throws Exception { @Test @SuppressWarnings("java:S2699") // waitFor => assertion with timeout public void testShutdownService() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - DatanodeConfiguration datanodeConfiguration = conf.getObject( - DatanodeConfiguration.class); - datanodeConfiguration.setBlockDeletionInterval(Duration.ofMillis(500)); - conf.setFromObject(datanodeConfiguration); + conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500, + TimeUnit.MILLISECONDS); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10); ContainerSet containerSet = new ContainerSet(); // Create 1 container with 100 blocks - createToDeleteBlocks(containerSet, conf, 1, 100, 1); - + createToDeleteBlocks(containerSet, 1, 100, 1); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); BlockDeletingServiceTestImpl service = - getBlockDeletingService(containerSet, conf); + getBlockDeletingService(containerSet, conf, keyValueHandler); service.start(); GenericTestUtils.waitFor(service::isStarted, 100, 3000); @@ -309,15 +346,19 @@ public void testShutdownService() throws Exception { @Test public void testBlockDeletionTimeout() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); ContainerSet containerSet = new ContainerSet(); - createToDeleteBlocks(containerSet, conf, 1, 3, 1); - + createToDeleteBlocks(containerSet, 1, 3, 1); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); // set timeout value as 1ns to trigger timeout behavior long timeout = 1; - OzoneContainer ozoneContainer = mockDependencies(containerSet); + OzoneContainer ozoneContainer = + mockDependencies(containerSet, keyValueHandler); BlockDeletingService svc = new BlockDeletingService(ozoneContainer, TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.NANOSECONDS, conf); @@ -338,7 +379,7 @@ public void testBlockDeletionTimeout() throws Exception { // test for normal case that doesn't have timeout limitation timeout = 0; - createToDeleteBlocks(containerSet, conf, 1, 3, 1); + createToDeleteBlocks(containerSet, 1, 3, 1); svc = new BlockDeletingService(ozoneContainer, TimeUnit.MILLISECONDS.toNanos(1000), timeout, TimeUnit.MILLISECONDS, conf); @@ -369,19 +410,21 @@ public void testBlockDeletionTimeout() throws Exception { } private BlockDeletingServiceTestImpl getBlockDeletingService( - ContainerSet containerSet, ConfigurationSource conf) { - OzoneContainer ozoneContainer = mockDependencies(containerSet); - return new BlockDeletingServiceTestImpl(ozoneContainer, 1000, conf); + ContainerSet containerSet, ConfigurationSource config, + KeyValueHandler keyValueHandler) { + OzoneContainer ozoneContainer = + mockDependencies(containerSet, keyValueHandler); + return new BlockDeletingServiceTestImpl(ozoneContainer, 1000, config); } - private OzoneContainer mockDependencies(ContainerSet containerSet) { + private OzoneContainer mockDependencies(ContainerSet containerSet, + KeyValueHandler keyValueHandler) { OzoneContainer ozoneContainer = mock(OzoneContainer.class); when(ozoneContainer.getContainerSet()).thenReturn(containerSet); when(ozoneContainer.getWriteChannel()).thenReturn(null); ContainerDispatcher dispatcher = mock(ContainerDispatcher.class); when(ozoneContainer.getDispatcher()).thenReturn(dispatcher); - handler = mock(KeyValueHandler.class); - when(dispatcher.getHandler(any())).thenReturn(handler); + when(dispatcher.getHandler(any())).thenReturn(keyValueHandler); return ozoneContainer; } @@ -396,7 +439,6 @@ public void testContainerThrottle() throws Exception { // // Each time only 1 container can be processed, so each time // 1 block from 1 container can be deleted. - OzoneConfiguration conf = new OzoneConfiguration(); // Process 1 container per interval conf.set( ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, @@ -404,28 +446,54 @@ public void testContainerThrottle() throws Exception { conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 1); conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 1); ContainerSet containerSet = new ContainerSet(); + int containerCount = 2; int chunksPerBlock = 10; int blocksPerContainer = 1; - createToDeleteBlocks(containerSet, conf, containerCount, blocksPerContainer, + createToDeleteBlocks(containerSet, containerCount, blocksPerContainer, chunksPerBlock); - + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); BlockDeletingServiceTestImpl service = - getBlockDeletingService(containerSet, conf); + getBlockDeletingService(containerSet, conf, keyValueHandler); service.start(); - + List containerData = Lists.newArrayList(); + containerSet.listContainer(0L, containerCount, containerData); try { GenericTestUtils.waitFor(service::isStarted, 100, 3000); - for (int i = 1; i <= containerCount; i++) { - deleteAndWait(service, i); - verify(handler, times(i * blocksPerContainer)) - .deleteBlock(any(), any()); - } + + // Deleting one of the two containers and its single block. + // Hence, space used by the container of whose block has been + // deleted should be zero. + deleteAndWait(service, 1); + Assert.assertTrue((containerData.get(0).getBytesUsed() == 0) + || containerData.get(1).getBytesUsed() == 0); + + Assert.assertFalse((containerData.get(0).getBytesUsed() == 0) && ( + containerData.get(1).getBytesUsed() == 0)); + + // Deleting the second container. Hence, space used by both the + // containers should be zero. + deleteAndWait(service, 2); + + Assert.assertTrue((containerData.get(1).getBytesUsed() == 0) && ( + containerData.get(1).getBytesUsed() == 0)); } finally { service.shutdown(); } } + public long currentBlockSpace(List containerData, + int totalContainers) { + long totalSpaceUsed = 0; + for (int i = 0; i < totalContainers; i++) { + totalSpaceUsed += containerData.get(i).getBytesUsed(); + } + return totalSpaceUsed; + } @Test(timeout = 30000) public void testBlockThrottle() throws Exception { @@ -439,92 +507,54 @@ public void testBlockThrottle() throws Exception { // Each time containers can be all scanned, but only 2 blocks // per container can be actually deleted. So it requires 2 waves // to cleanup all blocks. - OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); - int blockLimitPerTask = 2; + blockLimitPerTask = 2; conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, blockLimitPerTask); ContainerSet containerSet = new ContainerSet(); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); int containerCount = 5; int blocksPerContainer = 3; - createToDeleteBlocks(containerSet, conf, containerCount, + createToDeleteBlocks(containerSet, containerCount, blocksPerContainer, 1); BlockDeletingServiceTestImpl service = - getBlockDeletingService(containerSet, conf); + getBlockDeletingService(containerSet, conf, keyValueHandler); service.start(); - + List containerData = Lists.newArrayList(); + containerSet.listContainer(0L, containerCount, containerData); + long blockSpace = containerData.get(0).getBytesUsed() / blocksPerContainer; + long totalContainerSpace = + containerCount * containerData.get(0).getBytesUsed(); try { GenericTestUtils.waitFor(service::isStarted, 100, 3000); // Total blocks = 3 * 5 = 15 // block per task = 2 // number of containers = 5 // each interval will at most runDeletingTasks 5 * 2 = 10 blocks + + // Deleted space of 10 blocks should be equal to (initial total space + // of container - current total space of container). deleteAndWait(service, 1); - verify(handler, times(blockLimitPerTask * containerCount)) - .deleteBlock(any(), any()); + Assert.assertEquals(blockLimitPerTask * containerCount * blockSpace, + (totalContainerSpace - currentBlockSpace(containerData, + containerCount))); // There is only 5 blocks left to runDeletingTasks + + // (Deleted space of previous 10 blocks + these left 5 blocks) should + // be equal to (initial total space of container + // - current total space of container(it will be zero as all blocks + // in all the containers are deleted)). deleteAndWait(service, 2); - verify(handler, times( - blocksPerContainer * containerCount)) - .deleteBlock(any(), any()); + Assert.assertEquals(blocksPerContainer * containerCount * blockSpace, + (totalContainerSpace - currentBlockSpace(containerData, + containerCount))); } finally { service.shutdown(); } } - - @Test - public void testDeletedChunkInfo() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); - conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); - ContainerSet containerSet = new ContainerSet(); - createToDeleteBlocks(containerSet, conf, 1, 2, 3); - - List containerData = Lists.newArrayList(); - containerSet.listContainer(0L, 1, containerData); - - try(ReferenceCountedDB meta = BlockUtils.getDB( - (KeyValueContainerData) containerData.get(0), conf)) { - - // Collect all ChunkInfo from blocks marked for deletion. - List> deletingBlocks = - meta.getStore().getBlockDataTable() - .getRangeKVs(null, 100, - MetadataKeyFilters.getDeletingKeyFilter()); - - // Delete all blocks marked for deletion. - BlockDeletingServiceTestImpl svc = - getBlockDeletingService(containerSet, conf); - svc.start(); - GenericTestUtils.waitFor(svc::isStarted, 100, 3000); - deleteAndWait(svc, 1); - svc.shutdown(); - - // Get deleted blocks from their table, and check their ChunkInfo lists - // against those we saved for them before deletion. - List> deletedBlocks = - meta.getStore().getDeletedBlocksTable() - .getRangeKVs(null, 100); - - Assert.assertEquals(deletingBlocks.size(), deletedBlocks.size()); - - Iterator> - deletingBlocksIter = deletingBlocks.iterator(); - Iterator> - deletedBlocksIter = deletedBlocks.iterator(); - - while(deletingBlocksIter.hasNext() && deletedBlocksIter.hasNext()) { - List deletingChunks = - deletingBlocksIter.next().getValue().getChunks(); - List deletedChunks = - deletedBlocksIter.next().getValue().asList(); - - // On each element of each list, this call uses the equals method - // for ChunkInfos generated by protobuf. - // This checks their internal fields for equality. - Assert.assertEquals(deletingChunks, deletedChunks); - } - } - } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java index 01fa3bf372c4..00ebcb011207 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java @@ -27,11 +27,14 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList; +import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.BlockIterator; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB; +import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; +import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; @@ -52,11 +55,19 @@ import java.io.File; import java.io.IOException; import java.net.URL; -import java.util.*; +import java.util.List; +import java.util.UUID; +import java.util.ArrayList; +import java.util.Set; +import java.util.HashSet; +import java.util.Arrays; import java.util.stream.Collectors; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -225,8 +236,22 @@ public void testReadWithoutMetadata() throws Exception { @Test public void testDelete() throws Exception { final long numBlocksToDelete = TestDB.NUM_PENDING_DELETION_BLOCKS; + String datanodeUuid = UUID.randomUUID().toString(); + ContainerSet containerSet = makeContainerSet(); + VolumeSet volumeSet = new MutableVolumeSet(datanodeUuid, conf); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); + long initialTotalSpace = newKvData().getBytesUsed(); + long blockSpace = initialTotalSpace / TestDB.KEY_COUNT; + + runBlockDeletingService(keyValueHandler); - runBlockDeletingService(); + long currentTotalSpace = newKvData().getBytesUsed(); + long numberOfBlocksDeleted = + (initialTotalSpace - currentTotalSpace) / blockSpace; // Expected values after blocks with #deleting# prefix in original DB are // deleted. @@ -242,7 +267,7 @@ public void testDelete() throws Exception { assertEquals(expectedDeletingBlocks, countDeletingBlocks(refCountedDB)); assertEquals(expectedDeletedBlocks, - countDeletedBlocks(refCountedDB)); + TestDB.NUM_DELETED_BLOCKS + numberOfBlocksDeleted); assertEquals(expectedRegularBlocks, countUnprefixedBlocks(refCountedDB)); @@ -269,6 +294,14 @@ public void testDelete() throws Exception { */ @Test public void testReadDeletedBlockChunkInfo() throws Exception { + String datanodeUuid = UUID.randomUUID().toString(); + ContainerSet containerSet = makeContainerSet(); + VolumeSet volumeSet = new MutableVolumeSet(datanodeUuid, conf); + ContainerMetrics metrics = ContainerMetrics.create(conf); + KeyValueHandler keyValueHandler = + new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, + metrics, c -> { + }); try(ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) { // Read blocks that were already deleted before the upgrade. List> deletedBlocks = @@ -290,25 +323,22 @@ public void testReadDeletedBlockChunkInfo() throws Exception { Assert.assertEquals(TestDB.NUM_DELETED_BLOCKS, preUpgradeBlocks.size()); - runBlockDeletingService(); + long initialTotalSpace = newKvData().getBytesUsed(); + long blockSpace = initialTotalSpace / TestDB.KEY_COUNT; - // After the block deleting service runs, get the updated list of - // deleted blocks. - deletedBlocks = refCountedDB.getStore() - .getDeletedBlocksTable().getRangeKVs(null, 100); + runBlockDeletingService(keyValueHandler); - int numPostUpgradeDeletesFound = 0; - for(Table.KeyValue chunkListKV: deletedBlocks) { - if (!preUpgradeBlocks.contains(chunkListKV.getKey())) { - numPostUpgradeDeletesFound++; - Assert.assertNotNull(chunkListKV.getValue()); - } - } + long currentTotalSpace = newKvData().getBytesUsed(); + + // After the block deleting service runs, get the number of + // deleted blocks. + long numberOfBlocksDeleted = + (initialTotalSpace - currentTotalSpace) / blockSpace; // The blocks that were originally marked for deletion should now be // deleted. Assert.assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS, - numPostUpgradeDeletesFound); + numberOfBlocksDeleted); } } @@ -448,21 +478,22 @@ public void testReadDeletedBlocks() throws Exception { } } - private void runBlockDeletingService() throws Exception { + private void runBlockDeletingService(KeyValueHandler keyValueHandler) + throws Exception { conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10); conf.setInt(OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2); conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, - metadataDir.getAbsolutePath()); + metadataDir.getAbsolutePath()); - OzoneContainer container = makeMockOzoneContainer(); + OzoneContainer container = makeMockOzoneContainer(keyValueHandler); BlockDeletingServiceTestImpl service = - new BlockDeletingServiceTestImpl(container, 1000, conf); + new BlockDeletingServiceTestImpl(container, 1000, conf); service.start(); GenericTestUtils.waitFor(service::isStarted, 100, 3000); service.runDeletingTasks(); - GenericTestUtils.waitFor(() -> service.getTimesOfProcessed() == 1, - 100, 3000); + GenericTestUtils + .waitFor(() -> service.getTimesOfProcessed() == 1, 100, 3000); } private ContainerSet makeContainerSet() throws Exception { @@ -473,7 +504,8 @@ private ContainerSet makeContainerSet() throws Exception { return containerSet; } - private OzoneContainer makeMockOzoneContainer() throws Exception { + private OzoneContainer makeMockOzoneContainer(KeyValueHandler keyValueHandler) + throws Exception { ContainerSet containerSet = makeContainerSet(); OzoneContainer ozoneContainer = mock(OzoneContainer.class); @@ -481,8 +513,7 @@ private OzoneContainer makeMockOzoneContainer() throws Exception { when(ozoneContainer.getWriteChannel()).thenReturn(null); ContainerDispatcher dispatcher = mock(ContainerDispatcher.class); when(ozoneContainer.getDispatcher()).thenReturn(dispatcher); - KeyValueHandler handler = mock(KeyValueHandler.class); - when(dispatcher.getHandler(any())).thenReturn(handler); + when(dispatcher.getHandler(any())).thenReturn(keyValueHandler); return ozoneContainer; } From 570434164ef4fbe51db83d52421d2f83eed632c1 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 26 Nov 2020 10:46:25 +0100 Subject: [PATCH 43/51] HDDS-3363. Intermittent failure in testContainerImportExport (#1618) --- .../hadoop/ozone/container/keyvalue/KeyValueContainer.java | 3 +++ .../hadoop/ozone/container/keyvalue/TestKeyValueContainer.java | 1 - 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java index 1fff7494e87c..a239b5fbd8a5 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java @@ -528,6 +528,9 @@ public void exportContainerData(OutputStream destination, + getContainerData().getContainerID() + " is in state " + state); } compactDB(); + // Close DB (and remove from cache) to avoid concurrent modification while + // packing it. + BlockUtils.removeDB(containerData, config); packer.pack(this, destination); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java index c2b487be2933..25d8b1d25edf 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java @@ -199,7 +199,6 @@ public void testContainerImportExport() throws Exception { metadataStore.getStore().getMetadataTable() .put(OzoneConsts.BLOCK_COUNT, numberOfKeysToWrite); } - BlockUtils.removeDB(keyValueContainerData, CONF); Map metadata = new HashMap<>(); metadata.put("key1", "value1"); From 143f0767d17f9e867d0f533dcbdd8aa87c2d3602 Mon Sep 17 00:00:00 2001 From: Aryan Gupta <44232823+aryangupta1998@users.noreply.github.com> Date: Thu, 26 Nov 2020 22:12:27 +0530 Subject: [PATCH 44/51] HDDS-4510. SCM can avoid creating RetriableDatanodeEventWatcher for deletion command ACK (#1626) --- .../hadoop/hdds/scm/server/StorageContainerManager.java | 9 --------- 1 file changed, 9 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 3cf12e75d235..4872840a5c40 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -105,7 +105,6 @@ import org.apache.hadoop.ozone.common.Storage.StorageState; import org.apache.hadoop.ozone.lease.LeaseManager; import org.apache.hadoop.ozone.lock.LockManager; -import org.apache.hadoop.ozone.protocol.commands.RetriableDatanodeEventWatcher; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; @@ -314,14 +313,6 @@ public StorageContainerManager(OzoneConfiguration conf, PipelineActionHandler pipelineActionHandler = new PipelineActionHandler(pipelineManager, conf); - - RetriableDatanodeEventWatcher retriableDatanodeEventWatcher = - new RetriableDatanodeEventWatcher<>( - SCMEvents.RETRIABLE_DATANODE_COMMAND, - SCMEvents.DELETE_BLOCK_STATUS, - commandWatcherLeaseManager); - retriableDatanodeEventWatcher.start(eventQueue); - scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys .OZONE_ADMINISTRATORS); String scmUsername = UserGroupInformation.getCurrentUser().getUserName(); From 130ba4df34a02ecc5e58741cb1af8945ec607e82 Mon Sep 17 00:00:00 2001 From: GlenGeng Date: Fri, 27 Nov 2020 20:31:24 +0800 Subject: [PATCH 45/51] HDDS-4511: Avoiding StaleNodeHandler to take effect in TestDeleteWithSlowFollower. (#1625) --- .../hadoop/hdds/scm/container/ReplicationManager.java | 4 ++-- .../ozone/client/rpc/TestDeleteWithSlowFollower.java | 7 +++++++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java index bab885173627..ed6924ca8b03 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java @@ -441,8 +441,8 @@ private boolean isContainerEmpty(final ContainerInfo container, */ private boolean isContainerUnderReplicated(final ContainerInfo container, final Set replicas) { - if (container.getState() != LifeCycleState.CLOSED && - container.getState() != LifeCycleState.QUASI_CLOSED) { + if (container.getState() == LifeCycleState.DELETING || + container.getState() == LifeCycleState.DELETED) { return false; } boolean misReplicated = !getPlacementStatus( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java index e5cc628b221d..12c6d62f0bba 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithSlowFollower.java @@ -50,6 +50,7 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.interfaces.Container; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; import org.apache.hadoop.ozone.container.common.transport.server.ratis.ContainerStateMachine; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; @@ -130,10 +131,16 @@ public static void init() throws Exception { conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 1, TimeUnit.SECONDS); + ScmConfig scmConfig = conf.getObject(ScmConfig.class); scmConfig.setBlockDeletionInterval(Duration.ofSeconds(1)); conf.setFromObject(scmConfig); + DatanodeConfiguration datanodeConfiguration = conf.getObject( + DatanodeConfiguration.class); + datanodeConfiguration.setBlockDeletionInterval(Duration.ofMillis(100)); + conf.setFromObject(datanodeConfiguration); + RatisClientConfig ratisClientConfig = conf.getObject(RatisClientConfig.class); ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30)); From 285d7933b4bedb3d2c28f7b9ad7cf56a3759e7d7 Mon Sep 17 00:00:00 2001 From: Li Cheng Date: Tue, 1 Dec 2020 19:35:25 +0800 Subject: [PATCH 46/51] HDDS-4191 Add failover proxy for SCM container location. (#1514) --- ...ocationProtocolClientSideTranslatorPB.java | 21 +- ...SCMBlockLocationFailoverProxyProvider.java | 2 +- ...ontainerLocationFailoverProxyProvider.java | 284 ++++++++++++++++++ .../src/main/proto/ScmAdminProtocol.proto | 2 + ...ocationProtocolServerSideTranslatorPB.java | 14 + .../scm/cli/ContainerOperationClient.java | 34 +-- .../hadoop/ozone/MiniOzoneClusterImpl.java | 19 +- .../apache/hadoop/ozone/om/OzoneManager.java | 21 +- .../ozone/recon/ReconControllerModule.java | 6 +- .../ozone/freon/BaseFreonGenerator.java | 25 +- 10 files changed, 342 insertions(+), 86 deletions(-) create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMContainerLocationFailoverProxyProvider.java diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index cf888697313b..e5ee1234e335 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -68,7 +68,9 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; +import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider; import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; @@ -92,15 +94,20 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB private static final RpcController NULL_RPC_CONTROLLER = null; private final StorageContainerLocationProtocolPB rpcProxy; + private final SCMContainerLocationFailoverProxyProvider failoverProxyProvider; /** * Creates a new StorageContainerLocationProtocolClientSideTranslatorPB. * - * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy + * @param proxyProvider {@link SCMContainerLocationFailoverProxyProvider} */ public StorageContainerLocationProtocolClientSideTranslatorPB( - StorageContainerLocationProtocolPB rpcProxy) { - this.rpcProxy = rpcProxy; + SCMContainerLocationFailoverProxyProvider proxyProvider) { + Preconditions.checkNotNull(proxyProvider); + this.failoverProxyProvider = proxyProvider; + this.rpcProxy = (StorageContainerLocationProtocolPB) RetryProxy.create( + StorageContainerLocationProtocolPB.class, failoverProxyProvider, + failoverProxyProvider.getSCMContainerLocationRetryPolicy(null)); } /** @@ -127,7 +134,13 @@ private ScmContainerLocationResponse submitRequest( private ScmContainerLocationResponse submitRpcRequest( ScmContainerLocationRequest wrapper) throws ServiceException { - return rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper); + ScmContainerLocationResponse response = + rpcProxy.submitRequest(NULL_RPC_CONTROLLER, wrapper); + if (response.getStatus() == + ScmContainerLocationResponse.Status.SCM_NOT_LEADER) { + failoverProxyProvider.performFailoverToAssignedLeader(null); + } + return response; } /** diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java index a9ff4c1ea775..bcc1a01c13c4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java @@ -51,7 +51,7 @@ import static org.apache.hadoop.hdds.HddsUtils.getHostName; /** - * Failover proxy provider for SCM. + * Failover proxy provider for SCM block location. */ public class SCMBlockLocationFailoverProxyProvider implements FailoverProxyProvider, Closeable { diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMContainerLocationFailoverProxyProvider.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMContainerLocationFailoverProxyProvider.java new file mode 100644 index 000000000000..a04a66f4f278 --- /dev/null +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMContainerLocationFailoverProxyProvider.java @@ -0,0 +1,284 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdds.scm.proxy; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; +import org.apache.hadoop.io.retry.FailoverProxyProvider; +import org.apache.hadoop.io.retry.RetryPolicy; +import org.apache.hadoop.ipc.ProtobufRpcEngine; +import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.Closeable; +import java.io.IOException; +import java.net.InetSocketAddress; +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +import static org.apache.hadoop.hdds.HddsUtils.getHostName; +import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys; +import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY; + +/** + * Failover proxy provider for SCM container location. + */ +public class SCMContainerLocationFailoverProxyProvider implements + FailoverProxyProvider, Closeable { + public static final Logger LOG = + LoggerFactory.getLogger(SCMContainerLocationFailoverProxyProvider.class); + + private Map> scmProxies; + private Map scmProxyInfoMap; + private List scmNodeIDList; + + private String currentProxySCMNodeId; + private int currentProxyIndex; + + private final ConfigurationSource conf; + private final SCMClientConfig scmClientConfig; + private final long scmVersion; + + private final String scmServiceId; + + private final int maxRetryCount; + private final long retryInterval; + + public static final String SCM_DUMMY_NODEID_PREFIX = "scm"; + + public SCMContainerLocationFailoverProxyProvider(ConfigurationSource conf) { + this.conf = conf; + this.scmVersion = RPC.getProtocolVersion( + StorageContainerLocationProtocolPB.class); + this.scmServiceId = conf.getTrimmed(OZONE_SCM_SERVICE_IDS_KEY); + this.scmProxies = new HashMap<>(); + this.scmProxyInfoMap = new HashMap<>(); + this.scmNodeIDList = new ArrayList<>(); + loadConfigs(); + + this.currentProxyIndex = 0; + currentProxySCMNodeId = scmNodeIDList.get(currentProxyIndex); + scmClientConfig = conf.getObject(SCMClientConfig.class); + this.maxRetryCount = scmClientConfig.getRetryCount(); + this.retryInterval = scmClientConfig.getRetryInterval(); + } + + @VisibleForTesting + protected Collection getSCMAddressList() { + Collection scmAddressList = + conf.getTrimmedStringCollection(OZONE_SCM_NAMES); + Collection resultList = new ArrayList<>(); + if (!scmAddressList.isEmpty()) { + final int port = getPortNumberFromConfigKeys(conf, + ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY) + .orElse(ScmConfigKeys.OZONE_SCM_CLIENT_PORT_DEFAULT); + for (String scmAddress : scmAddressList) { + LOG.debug("SCM Address for proxy is {}", scmAddress); + + Optional hostname = getHostName(scmAddress); + if (hostname.isPresent()) { + resultList.add(NetUtils.createSocketAddr( + hostname.get() + ":" + port)); + } + } + } + if (resultList.isEmpty()) { + // fall back + resultList.add(getScmAddressForClients(conf)); + } + return resultList; + } + + private void loadConfigs() { + Collection scmAddressList = getSCMAddressList(); + int scmNodeIndex = 1; + for (InetSocketAddress scmAddress : scmAddressList) { + String nodeId = SCM_DUMMY_NODEID_PREFIX + scmNodeIndex; + if (scmAddress == null) { + LOG.error("Failed to create SCM proxy for {}.", nodeId); + continue; + } + scmNodeIndex++; + SCMProxyInfo scmProxyInfo = new SCMProxyInfo( + scmServiceId, nodeId, scmAddress); + ProxyInfo proxy + = new ProxyInfo<>(null, scmProxyInfo.toString()); + scmProxies.put(nodeId, proxy); + scmProxyInfoMap.put(nodeId, scmProxyInfo); + scmNodeIDList.add(nodeId); + } + + if (scmProxies.isEmpty()) { + throw new IllegalArgumentException("Could not find any configured " + + "addresses for SCM. Please configure the system with " + + OZONE_SCM_NAMES); + } + } + + @VisibleForTesting + public synchronized String getCurrentProxyOMNodeId() { + return currentProxySCMNodeId; + } + + @Override + public synchronized ProxyInfo getProxy() { + ProxyInfo currentProxyInfo = scmProxies.get(currentProxySCMNodeId); + createSCMProxyIfNeeded(currentProxyInfo, currentProxySCMNodeId); + return currentProxyInfo; + } + + @Override + public void performFailover( + StorageContainerLocationProtocolPB newLeader) { + // Should do nothing here. + LOG.debug("Failing over to next proxy. {}", getCurrentProxyOMNodeId()); + } + + public void performFailoverToAssignedLeader(String newLeader) { + if (newLeader == null) { + // If newLeader is not assigned, it will fail over to next proxy. + nextProxyIndex(); + } else { + if (!assignLeaderToNode(newLeader)) { + LOG.debug("Failing over OM proxy to nodeId: {}", newLeader); + nextProxyIndex(); + } + } + } + + @Override + public Class< + StorageContainerLocationProtocolPB> getInterface() { + return StorageContainerLocationProtocolPB.class; + } + + @Override + public synchronized void close() throws IOException { + for (ProxyInfo + proxy : scmProxies.values()) { + StorageContainerLocationProtocolPB scmProxy = + proxy.proxy; + if (scmProxy != null) { + RPC.stopProxy(scmProxy); + } + } + } + + public RetryPolicy.RetryAction getRetryAction(int failovers) { + if (failovers < maxRetryCount) { + return new RetryPolicy.RetryAction( + RetryPolicy.RetryAction.RetryDecision.FAILOVER_AND_RETRY, + getRetryInterval()); + } else { + return RetryPolicy.RetryAction.FAIL; + } + } + + private synchronized long getRetryInterval() { + // TODO add exponential backup + return retryInterval; + } + + private synchronized int nextProxyIndex() { +// lastAttemptedLeader = currentProxySCMNodeId; + + // round robin the next proxy + currentProxyIndex = (currentProxyIndex + 1) % scmProxies.size(); + currentProxySCMNodeId = scmNodeIDList.get(currentProxyIndex); + return currentProxyIndex; + } + + synchronized boolean assignLeaderToNode(String newLeaderNodeId) { + if (!currentProxySCMNodeId.equals(newLeaderNodeId)) { + if (scmProxies.containsKey(newLeaderNodeId)) { +// lastAttemptedLeader = currentProxySCMNodeId; + currentProxySCMNodeId = newLeaderNodeId; + currentProxyIndex = scmNodeIDList.indexOf(currentProxySCMNodeId); + return true; + } + } +// } else { +// lastAttemptedLeader = currentProxySCMNodeId; +// } + return false; + } + + /** + * Creates proxy object if it does not already exist. + */ + private void createSCMProxyIfNeeded(ProxyInfo proxyInfo, + String nodeId) { + if (proxyInfo.proxy == null) { + InetSocketAddress address = scmProxyInfoMap.get(nodeId).getAddress(); + try { + StorageContainerLocationProtocolPB proxy = + createSCMProxy(address); + try { + proxyInfo.proxy = proxy; + } catch (IllegalAccessError iae) { + scmProxies.put(nodeId, + new ProxyInfo<>(proxy, proxyInfo.proxyInfo)); + } + } catch (IOException ioe) { + LOG.error("{} Failed to create RPC proxy to SCM at {}", + this.getClass().getSimpleName(), address, ioe); + throw new RuntimeException(ioe); + } + } + } + + private StorageContainerLocationProtocolPB createSCMProxy( + InetSocketAddress scmAddress) throws IOException { + Configuration hadoopConf = + LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); + RPC.setProtocolEngine(hadoopConf, StorageContainerLocationProtocolPB.class, + ProtobufRpcEngine.class); + return RPC.getProxy( + StorageContainerLocationProtocolPB.class, + scmVersion, scmAddress, UserGroupInformation.getCurrentUser(), + hadoopConf, NetUtils.getDefaultSocketFactory(hadoopConf), + (int)scmClientConfig.getRpcTimeOut()); + } + + public RetryPolicy getSCMContainerLocationRetryPolicy( + String suggestedLeader) { + RetryPolicy retryPolicy = new RetryPolicy() { + @Override + public RetryAction shouldRetry(Exception e, int retry, + int failover, boolean b) { + performFailoverToAssignedLeader(suggestedLeader); + return getRetryAction(failover); + } + }; + return retryPolicy; + } +} diff --git a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto index 91dbebe33b88..739377551fea 100644 --- a/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto +++ b/hadoop-hdds/interface-admin/src/main/proto/ScmAdminProtocol.proto @@ -100,6 +100,7 @@ message ScmContainerLocationResponse { OK = 1; CONTAINER_ALREADY_EXISTS = 2; CONTAINER_IS_MISSING = 3; + SCM_NOT_LEADER = 4; } } @@ -147,6 +148,7 @@ message ContainerResponseProto { success = 1; errorContainerAlreadyExists = 2; errorContainerMissing = 3; + scmNotLeader = 4; } required Error errorCode = 1; required ContainerWithPipeline containerWithPipeline = 2; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index 24f17f124c44..b2f6534372a1 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer; import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher; import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics; @@ -120,9 +121,22 @@ public StorageContainerLocationProtocolServerSideTranslatorPB( protocolMetrics, LOG); } + private boolean isLeader() throws ServiceException { + if (!(impl instanceof SCMClientProtocolServer)) { + throw new ServiceException("Should be SCMClientProtocolServer"); + } else { + return ((SCMClientProtocolServer) impl).getScm().checkLeader(); + } + } + @Override public ScmContainerLocationResponse submitRequest(RpcController controller, ScmContainerLocationRequest request) throws ServiceException { + if (!isLeader()) { + return ScmContainerLocationResponse.newBuilder() + .setCmdType(request.getCmdType()).setTraceID(request.getTraceID()) + .setSuccess(false).setStatus(Status.SCM_NOT_LEADER).build(); + } return dispatcher .processRequest(request, this::processRequest, request.getCmdType(), request.getTraceID()); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java index 038364227f1a..f67addfcc8ae 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java @@ -17,14 +17,11 @@ */ package org.apache.hadoop.hdds.scm.cli; -import javax.net.SocketFactory; import java.io.IOException; -import java.net.InetSocketAddress; import java.util.List; import java.util.Map; import org.apache.commons.lang3.tuple.Pair; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -41,20 +38,13 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.ProtobufRpcEngine; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OzoneSecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; import com.google.common.base.Preconditions; -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient; @@ -116,25 +106,13 @@ private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf) } public static StorageContainerLocationProtocol newContainerRpcClient( - ConfigurationSource configSource) throws IOException { - - Class protocol = - StorageContainerLocationProtocolPB.class; - Configuration conf = - LegacyHadoopConfigurationSource.asHadoopConfiguration(configSource); - RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class); - long version = RPC.getProtocolVersion(protocol); - InetSocketAddress scmAddress = getScmAddressForClients(configSource); - UserGroupInformation user = UserGroupInformation.getCurrentUser(); - SocketFactory socketFactory = NetUtils.getDefaultSocketFactory(conf); - int rpcTimeOut = Client.getRpcTimeout(conf); - - StorageContainerLocationProtocolPB rpcProxy = - RPC.getProxy(protocol, version, scmAddress, user, conf, - socketFactory, rpcTimeOut); + ConfigurationSource configSource) { + SCMContainerLocationFailoverProxyProvider proxyProvider = + new SCMContainerLocationFailoverProxyProvider(configSource); StorageContainerLocationProtocolClientSideTranslatorPB client = - new StorageContainerLocationProtocolClientSideTranslatorPB(rpcProxy); + new StorageContainerLocationProtocolClientSideTranslatorPB( + proxyProvider); return TracingUtil.createProxy( client, StorageContainerLocationProtocol.class, configSource); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 099b0697de31..340b902ddc1a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -45,16 +45,13 @@ import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider; import org.apache.hadoop.hdds.scm.safemode.HealthyPipelineSafeModeRule; import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; -import org.apache.hadoop.ipc.Client; -import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.common.Storage.StorageState; @@ -64,7 +61,6 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.recon.ConfigurationProvider; import org.apache.hadoop.ozone.recon.ReconServer; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.test.GenericTestUtils; @@ -284,18 +280,17 @@ public OzoneClient getRpcClient() throws IOException { */ @Override public StorageContainerLocationProtocolClientSideTranslatorPB - getStorageContainerLocationClient() throws IOException { - long version = RPC.getProtocolVersion( - StorageContainerLocationProtocolPB.class); + getStorageContainerLocationClient() { InetSocketAddress address = scm.getClientRpcAddress(); LOG.info( "Creating StorageContainerLocationProtocol RPC client with address {}", address); + + SCMContainerLocationFailoverProxyProvider proxyProvider = + new SCMContainerLocationFailoverProxyProvider(conf); + return new StorageContainerLocationProtocolClientSideTranslatorPB( - RPC.getProxy(StorageContainerLocationProtocolPB.class, version, - address, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))); + proxyProvider); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 07424b376312..e7f064e00212 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -72,8 +72,8 @@ import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; +import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider; import org.apache.hadoop.hdds.security.x509.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient; @@ -93,12 +93,10 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.RetryPolicy; -import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.metrics2.util.MBeans; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -848,22 +846,13 @@ private static ScmBlockLocationProtocol getScmBlockClient( * @throws IOException */ private static StorageContainerLocationProtocol getScmContainerClient( - OzoneConfiguration conf) throws IOException { - RPC.setProtocolEngine(conf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); - long scmVersion = - RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class); - InetSocketAddress scmAddr = getScmAddressForClients( - conf); + OzoneConfiguration conf) { + SCMContainerLocationFailoverProxyProvider proxyProvider = + new SCMContainerLocationFailoverProxyProvider(conf); StorageContainerLocationProtocol scmContainerClient = TracingUtil.createProxy( new StorageContainerLocationProtocolClientSideTranslatorPB( - RPC.getProxy(StorageContainerLocationProtocolPB.class, - scmVersion, - scmAddr, UserGroupInformation.getCurrentUser(), conf, - NetUtils.getDefaultSocketFactory(conf), - Client.getRpcTimeout(conf))), - StorageContainerLocationProtocol.class, conf); + proxyProvider), StorageContainerLocationProtocol.class, conf); return scmContainerClient; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java index cb667f43855b..c08ca873323a 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java @@ -171,11 +171,7 @@ OzoneManagerProtocol getOzoneManagerProtocol( StorageContainerLocationProtocol getSCMProtocol( final OzoneConfiguration configuration) { StorageContainerLocationProtocol storageContainerLocationProtocol = null; - try { - storageContainerLocationProtocol = newContainerRpcClient(configuration); - } catch (IOException e) { - LOG.error("Error in provisioning StorageContainerLocationProtocol ", e); - } + storageContainerLocationProtocol = newContainerRpcClient(configuration); return storageContainerLocationProtocol; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index 1cfff127097c..18f75dee3219 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.io.InputStream; -import java.net.InetSocketAddress; import java.util.LinkedList; import java.util.List; import java.util.concurrent.ExecutorService; @@ -34,12 +33,10 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; -import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; +import org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider; import org.apache.hadoop.hdds.tracing.TracingUtil; -import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.OzoneVolume; @@ -60,7 +57,6 @@ import io.opentracing.util.GlobalTracer; import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomStringUtils; -import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY; import org.apache.ratis.protocol.ClientId; import org.slf4j.Logger; @@ -337,24 +333,13 @@ public OzoneManagerProtocolClientSideTranslatorPB createOmClient( } public StorageContainerLocationProtocol createStorageContainerLocationClient( - OzoneConfiguration ozoneConf) - throws IOException { - - long version = RPC.getProtocolVersion( - StorageContainerLocationProtocolPB.class); - InetSocketAddress scmAddress = - getScmAddressForClients(ozoneConf); - - RPC.setProtocolEngine(ozoneConf, StorageContainerLocationProtocolPB.class, - ProtobufRpcEngine.class); + OzoneConfiguration ozoneConf) { + SCMContainerLocationFailoverProxyProvider proxyProvider = + new SCMContainerLocationFailoverProxyProvider(ozoneConf); StorageContainerLocationProtocol client = TracingUtil.createProxy( new StorageContainerLocationProtocolClientSideTranslatorPB( - RPC.getProxy(StorageContainerLocationProtocolPB.class, version, - scmAddress, UserGroupInformation.getCurrentUser(), - ozoneConf, - NetUtils.getDefaultSocketFactory(ozoneConf), - Client.getRpcTimeout(ozoneConf))), + proxyProvider), StorageContainerLocationProtocol.class, ozoneConf); return client; } From 48b980904205823259393d5b521e6a3c1d046898 Mon Sep 17 00:00:00 2001 From: GlenGeng Date: Wed, 2 Dec 2020 23:26:20 +0800 Subject: [PATCH 47/51] HDDS-4538: Workaround on HDDS-2823, hard code scmUuid and clusterID. (#1649) --- .../java/org/apache/hadoop/ozone/common/StorageInfo.java | 7 +++++-- .../apache/hadoop/hdds/scm/server/SCMStorageConfig.java | 7 +++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java index 55911fcfd994..c88aaa9b25d5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/StorageInfo.java @@ -27,7 +27,6 @@ import java.io.IOException; import java.io.RandomAccessFile; import java.util.Properties; -import java.util.UUID; /** * Common class for storage information. This class defines the common @@ -198,7 +197,11 @@ private Properties readFrom(File from) throws IOException { * @return new clusterID */ public static String newClusterID() { - return "CID-" + UUID.randomUUID().toString(); + // TODO: + // Please check https://issues.apache.org/jira/browse/HDDS-4538 + // hard code clusterID and scmUuid on HDDS-2823, + // so that multi SCMs won't cause chaos in Datanode side. + return "CID-1df51ed9-19f1-4283-8f61-5d90a84c196c"; } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java index a6282799cf55..fab33b575eeb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMStorageConfig.java @@ -25,7 +25,6 @@ import java.io.File; import java.io.IOException; import java.util.Properties; -import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConsts.SCM_ID; import static org.apache.hadoop.ozone.OzoneConsts.STORAGE_DIR; @@ -69,7 +68,11 @@ public String getScmId() { protected Properties getNodeProperties() { String scmId = getScmId(); if (scmId == null) { - scmId = UUID.randomUUID().toString(); + // TODO: + // Please check https://issues.apache.org/jira/browse/HDDS-4538 + // hard code clusterID and scmUuid on HDDS-2823, + // so that multi SCMs won't cause chaos in Datanode side. + scmId = "3a11fedb-cce5-46ac-bb0d-cfdf17df9a19"; } Properties scmProperties = new Properties(); scmProperties.setProperty(SCM_ID, scmId); From 08012034695f4cf3e598859045b7de62a35c9324 Mon Sep 17 00:00:00 2001 From: GlenGeng Date: Fri, 4 Dec 2020 15:31:08 +0800 Subject: [PATCH 48/51] HDDS-4542. Need throw exception to trigger FailoverProxyProvider of SCM client to work (#1652) --- .../ScmBlockLocationProtocolServerSideTranslatorPB.java | 8 +------- ...geContainerLocationProtocolServerSideTranslatorPB.java | 4 +--- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java index cbb64c195a22..ea6a148a95c2 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -107,13 +107,7 @@ private boolean isLeader() throws ServiceException { public SCMBlockLocationResponse send(RpcController controller, SCMBlockLocationRequest request) throws ServiceException { if (!isLeader()) { - SCMBlockLocationResponse.Builder response = createSCMBlockResponse( - request.getCmdType(), - request.getTraceID()); - response.setSuccess(false); - response.setStatus(Status.SCM_NOT_LEADER); - response.setLeaderSCMNodeId(null); - return response.build(); + throw new ServiceException(new IOException("SCM IS NOT LEADER")); } return dispatcher.processRequest( request, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java index b2f6534372a1..aa19cec60274 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -133,9 +133,7 @@ private boolean isLeader() throws ServiceException { public ScmContainerLocationResponse submitRequest(RpcController controller, ScmContainerLocationRequest request) throws ServiceException { if (!isLeader()) { - return ScmContainerLocationResponse.newBuilder() - .setCmdType(request.getCmdType()).setTraceID(request.getTraceID()) - .setSuccess(false).setStatus(Status.SCM_NOT_LEADER).build(); + throw new ServiceException(new IOException("SCM IS NOT LEADER")); } return dispatcher .processRequest(request, this::processRequest, request.getCmdType(), From 0aa9ba3e354193855e43b488d325cd64137c5648 Mon Sep 17 00:00:00 2001 From: GlenGeng Date: Mon, 7 Dec 2020 13:10:39 +0800 Subject: [PATCH 49/51] HDDS-3988: DN can distinguish SCMCommand from stale leader SCM (#1314) * HDDS-3988: DN can distinguish SCMCommand from stale leader SCM. * HDDS-3988: fix comments --- .../common/statemachine/StateContext.java | 94 ++++++++++++++++++- .../endpoint/HeartbeatEndpointTask.java | 18 ++++ .../container/ozoneimpl/OzoneContainer.java | 8 ++ .../ozone/protocol/commands/SCMCommand.java | 22 ++++- .../ScmServerDatanodeHeartbeatProtocol.proto | 6 ++ .../hadoop/hdds/scm/ha/SCMHAManager.java | 9 +- .../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 26 ++--- .../hadoop/hdds/scm/node/SCMNodeManager.java | 29 +++++- .../scm/pipeline/PipelineManagerV2Impl.java | 11 ++- .../scm/server/SCMDatanodeProtocolServer.java | 5 + .../scm/server/StorageContainerManager.java | 4 +- .../hadoop/hdds/scm/ha/MockSCMHAManager.java | 5 +- pom.xml | 2 +- 13 files changed, 208 insertions(+), 31 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java index 4cd769f4d245..f39755ffe8fc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java @@ -23,6 +23,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Queue; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; @@ -31,6 +32,7 @@ import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.Lock; import java.util.concurrent.locks.ReentrantLock; @@ -80,6 +82,18 @@ public class StateContext { private boolean shutdownGracefully = false; private final AtomicLong threadPoolNotAvailableCount; + /** + * term of latest leader SCM, extract from SCMCommand. + * + * Only leader SCM (both latest and stale) can send out SCMCommand, + * which will save its term in SCMCommand. Since latest leader SCM + * always has the highest term, term can be used to detect SCMCommand + * from stale leader SCM. + * + * For non-HA mode, term of SCMCommand will be 0. + */ + private Optional termOfLeaderSCM = Optional.empty(); + /** * Starting with a 2 sec heartbeat frequency which will be updated to the * real HB frequency after scm registration. With this method the @@ -470,6 +484,65 @@ public void execute(ExecutorService service, long time, TimeUnit unit) } } + /** + * After startup, datanode needs detect latest leader SCM before handling + * any SCMCommand, so that it won't be disturbed by stale leader SCM. + * + * The rule is: after majority SCMs are in HEARTBEAT state and has + * heard from leader SCMs (commandQueue is not empty), datanode will init + * termOfLeaderSCM with the max term found in commandQueue. + * + * The init process also works for non-HA mode. In that case, term of all + * SCMCommands will be 0. + */ + private void initTermOfLeaderSCM() { + // only init once + if (termOfLeaderSCM.isPresent()) { + return; + } + + AtomicInteger scmNum = new AtomicInteger(0); + AtomicInteger activeScmNum = new AtomicInteger(0); + + getParent().getConnectionManager().getValues() + .forEach(endpoint -> { + if (endpoint.isPassive()) { + return; + } + scmNum.incrementAndGet(); + if (endpoint.getState() + == EndpointStateMachine.EndPointStates.HEARTBEAT) { + activeScmNum.incrementAndGet(); + } + }); + + // majority SCMs should be in HEARTBEAT state. + if (activeScmNum.get() < scmNum.get() / 2 + 1) { + return; + } + + // if commandQueue is not empty, init termOfLeaderSCM + // with the largest term found in commandQueue + commandQueue.stream() + .mapToLong(SCMCommand::getTerm) + .max() + .ifPresent(term -> termOfLeaderSCM = Optional.of(term)); + } + + /** + * monotonically increase termOfLeaderSCM. + * Always record the latest term that has seen. + */ + private void updateTermOfLeaderSCM(SCMCommand command) { + if (!termOfLeaderSCM.isPresent()) { + LOG.error("should init termOfLeaderSCM before update it."); + return; + } + + termOfLeaderSCM = Optional.of( + Long.max(termOfLeaderSCM.get(), command.getTerm())); + } + /** * Returns the next command or null if it is empty. * @@ -478,7 +551,26 @@ public void execute(ExecutorService service, long time, TimeUnit unit) public SCMCommand getNextCommand() { lock.lock(); try { - return commandQueue.poll(); + initTermOfLeaderSCM(); + if (!termOfLeaderSCM.isPresent()) { + return null; // not ready yet + } + + while (true) { + SCMCommand command = commandQueue.poll(); + if (command == null) { + return null; + } + + updateTermOfLeaderSCM(command); + if (command.getTerm() == termOfLeaderSCM.get()) { + return command; + } + + LOG.warn("Detect and drop a SCMCommand {} from stale leader SCM," + + " stale term {}, latest term {}.", + command, command.getTerm(), termOfLeaderSCM.get()); + } } finally { lock.unlock(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index da2034d93c2d..eac7b37e3383 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -272,6 +272,9 @@ private void processResponse(SCMHeartbeatResponseProto response, DeleteBlocksCommand db = DeleteBlocksCommand .getFromProtobuf( commandResponseProto.getDeleteBlocksCommandProto()); + if (commandResponseProto.hasTerm()) { + db.setTerm(commandResponseProto.getTerm()); + } if (!db.blocksTobeDeleted().isEmpty()) { if (LOG.isDebugEnabled()) { LOG.debug(DeletedContainerBlocksSummary @@ -285,6 +288,9 @@ private void processResponse(SCMHeartbeatResponseProto response, CloseContainerCommand closeContainer = CloseContainerCommand.getFromProtobuf( commandResponseProto.getCloseContainerCommandProto()); + if (commandResponseProto.hasTerm()) { + closeContainer.setTerm(commandResponseProto.getTerm()); + } if (LOG.isDebugEnabled()) { LOG.debug("Received SCM container close request for container {}", closeContainer.getContainerID()); @@ -295,6 +301,9 @@ private void processResponse(SCMHeartbeatResponseProto response, ReplicateContainerCommand replicateContainerCommand = ReplicateContainerCommand.getFromProtobuf( commandResponseProto.getReplicateContainerCommandProto()); + if (commandResponseProto.hasTerm()) { + replicateContainerCommand.setTerm(commandResponseProto.getTerm()); + } if (LOG.isDebugEnabled()) { LOG.debug("Received SCM container replicate request for container {}", replicateContainerCommand.getContainerID()); @@ -305,6 +314,9 @@ private void processResponse(SCMHeartbeatResponseProto response, DeleteContainerCommand deleteContainerCommand = DeleteContainerCommand.getFromProtobuf( commandResponseProto.getDeleteContainerCommandProto()); + if (commandResponseProto.hasTerm()) { + deleteContainerCommand.setTerm(commandResponseProto.getTerm()); + } if (LOG.isDebugEnabled()) { LOG.debug("Received SCM delete container request for container {}", deleteContainerCommand.getContainerID()); @@ -315,6 +327,9 @@ private void processResponse(SCMHeartbeatResponseProto response, CreatePipelineCommand createPipelineCommand = CreatePipelineCommand.getFromProtobuf( commandResponseProto.getCreatePipelineCommandProto()); + if (commandResponseProto.hasTerm()) { + createPipelineCommand.setTerm(commandResponseProto.getTerm()); + } if (LOG.isDebugEnabled()) { LOG.debug("Received SCM create pipeline request {}", createPipelineCommand.getPipelineID()); @@ -325,6 +340,9 @@ private void processResponse(SCMHeartbeatResponseProto response, ClosePipelineCommand closePipelineCommand = ClosePipelineCommand.getFromProtobuf( commandResponseProto.getClosePipelineCommandProto()); + if (commandResponseProto.hasTerm()) { + closePipelineCommand.setTerm(commandResponseProto.getTerm()); + } if (LOG.isDebugEnabled()) { LOG.debug("Received SCM close pipeline request {}", closePipelineCommand.getPipelineID()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java index a44ef384362b..5fd1690c1f72 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java @@ -25,6 +25,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Consumer; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -85,6 +86,7 @@ public class OzoneContainer { private List dataScanners; private final BlockDeletingService blockDeletingService; private final GrpcTlsConfig tlsClientConfig; + private final AtomicBoolean isStarted; /** * Construct OzoneContainer object. @@ -152,6 +154,8 @@ public OzoneContainer(DatanodeDetails datanodeDetails, ConfigurationSource TimeUnit.MILLISECONDS, config); tlsClientConfig = RatisHelper.createTlsClientConfig( secConf, certClient != null ? certClient.getCACertificate() : null); + + isStarted = new AtomicBoolean(false); } public GrpcTlsConfig getTlsClientConfig() { @@ -240,6 +244,10 @@ private void stopContainerScrub() { * @throws IOException */ public void start(String scmId) throws IOException { + if (!isStarted.compareAndSet(false, true)) { + LOG.info("Ignore. OzoneContainer already started."); + return; + } LOG.info("Attempting to start container services."); startContainerScrub(); writeChannel.start(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java index 3c4e05b424af..4d87bb096cb6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/SCMCommand.java @@ -30,7 +30,13 @@ */ public abstract class SCMCommand implements IdentifiableEventPayload { - private long id; + private final long id; + + // Under HA mode, holds term of underlying RaftServer iff current + // SCM is a leader, otherwise, holds term 0. + // Notes that, the first elected leader is from term 1, term 0, + // as the initial value of currentTerm, is never used under HA mode. + private long term = 0; SCMCommand() { this.id = HddsIdFactory.getLongId(); @@ -59,4 +65,18 @@ public long getId() { return id; } + /** + * Get term of this command. + * @return term + */ + public long getTerm() { + return term; + } + + /** + * Set term of this command. + */ + public void setTerm(long term) { + this.term = term; + } } diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto index 4f610ff24b1a..973789a35369 100644 --- a/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto +++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto @@ -303,6 +303,12 @@ message SCMCommandProto { optional ReplicateContainerCommandProto replicateContainerCommandProto = 6; optional CreatePipelineCommandProto createPipelineCommandProto = 7; optional ClosePipelineCommandProto closePipelineCommandProto = 8; + + // Under HA mode, holds term of underlying RaftServer iff current + // SCM is a leader, otherwise, holds term 0. + // Notes that, the first elected leader is from term 1, term 0, + // as the initial value of currentTerm, is never used under HA mode. + optional uint64 term = 15; } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index 8ee26a25df03..0fd5e8276045 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -22,6 +22,7 @@ import org.apache.ratis.protocol.exceptions.NotLeaderException; import java.io.IOException; +import java.util.Optional; /** * SCMHAManager provides HA service for SCM. @@ -34,9 +35,13 @@ public interface SCMHAManager { void start() throws IOException; /** - * Returns true if the current SCM is the leader. + * For HA mode, return an Optional that holds term of the + * underlying RaftServer iff current SCM is in leader role. + * Otherwise, return an empty optional. + * + * For non-HA mode, return an Optional that holds term 0. */ - boolean isLeader(); + Optional isLeader(); /** * Returns RatisServer instance associated with the SCM instance. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index 33f408d6b752..5271ac6f8e90 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -21,6 +21,7 @@ import java.util.List; import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.ratis.proto.RaftProtos; import org.apache.ratis.protocol.RaftGroupMemberId; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; @@ -32,6 +33,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.util.Optional; /** * SCMHAManagerImpl uses Apache Ratis for HA implementation. We will have 2N+1 @@ -70,29 +72,28 @@ public void start() throws IOException { * {@inheritDoc} */ @Override - public boolean isLeader() { + public Optional isLeader() { if (!SCMHAUtils.isSCMHAEnabled(conf)) { // When SCM HA is not enabled, the current SCM is always the leader. - return true; + return Optional.of((long)0); } RaftServer server = ratisServer.getServer(); Preconditions.checkState(server instanceof RaftServerProxy); - RaftServerImpl serverImpl = null; try { // SCM only has one raft group. - serverImpl = ((RaftServerProxy) server) + RaftServerImpl serverImpl = ((RaftServerProxy) server) .getImpl(ratisServer.getRaftGroupId()); if (serverImpl != null) { - // Only when it's sure the current SCM is the leader, otherwise - // it should all return false. - return serverImpl.isLeader(); + RaftProtos.RoleInfoProto roleInfoProto = serverImpl.getRoleInfoProto(); + return roleInfoProto.hasLeaderInfo() + ? Optional.of(roleInfoProto.getLeaderInfo().getTerm()) + : Optional.empty(); } } catch (IOException ioe) { LOG.error("Fail to get RaftServer impl and therefore it's not clear " + "whether it's leader. ", ioe); } - - return false; + return Optional.empty(); } /** @@ -104,11 +105,6 @@ public SCMRatisServer getRatisServer() { } private RaftPeerId getPeerIdFromRoleInfo(RaftServerImpl serverImpl) { - /* - TODO: Fix Me - Ratis API has changed. - RaftServerImpl#getRoleInfoProto is no more public. - if (serverImpl.isLeader()) { return RaftPeerId.getRaftPeerId( serverImpl.getRoleInfoProto().getLeaderInfo().toString()); @@ -119,8 +115,6 @@ private RaftPeerId getPeerIdFromRoleInfo(RaftServerImpl serverImpl) { } else { return null; } - */ - return null; } @Override diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java index 328f2712b5fe..89fd99ecd49f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java @@ -25,6 +25,7 @@ import java.util.LinkedList; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.UUID; import java.util.Collections; @@ -47,6 +48,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric; import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat; +import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.net.NetworkTopology; import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -106,13 +108,16 @@ public class SCMNodeManager implements NodeManager { new ConcurrentHashMap<>(); private final int numPipelinesPerMetadataVolume; private final int heavyNodeCriteria; + private final SCMHAManager scmhaManager; /** * Constructs SCM machine Manager. */ public SCMNodeManager(OzoneConfiguration conf, - SCMStorageConfig scmStorageConfig, EventPublisher eventPublisher, - NetworkTopology networkTopology) { + SCMStorageConfig scmStorageConfig, + EventPublisher eventPublisher, + NetworkTopology networkTopology, + SCMHAManager scmhaManager) { this.nodeStateManager = new NodeStateManager(conf, eventPublisher); this.version = VersionInfo.getLatestVersion(); this.commandQueue = new CommandQueue(); @@ -138,6 +143,14 @@ public SCMNodeManager(OzoneConfiguration conf, ScmConfigKeys.OZONE_SCM_PIPELINE_PER_METADATA_VOLUME_DEFAULT); String dnLimit = conf.get(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT); this.heavyNodeCriteria = dnLimit == null ? 0 : Integer.parseInt(dnLimit); + this.scmhaManager = scmhaManager; + } + + public SCMNodeManager(OzoneConfiguration conf, + SCMStorageConfig scmStorageConfig, + EventPublisher eventPublisher, + NetworkTopology networkTopology) { + this(conf, scmStorageConfig, eventPublisher, networkTopology, null); } private void registerMXBean() { @@ -658,6 +671,18 @@ public Set getContainers(DatanodeDetails datanodeDetails) // Refactor and remove all the usage of this method and delete this method. @Override public void addDatanodeCommand(UUID dnId, SCMCommand command) { + if (scmhaManager != null && command.getTerm() == 0) { + Optional termOpt = scmhaManager.isLeader(); + + if (!termOpt.isPresent()) { + LOG.warn("Not leader, drop SCMCommand {}.", command); + return; + } + + LOG.warn("Help set term {} for SCMCommand {}. It is not an accurate " + + "way to set term of SCMCommand.", termOpt.get(), command); + command.setTerm(termOpt.get()); + } this.commandQueue.addCommand(dnId, command); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index 041c94179112..48fbdbff440f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -49,6 +49,7 @@ import java.util.List; import java.util.Map; import java.util.NavigableSet; +import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -637,13 +638,15 @@ public void setScmhaManager(SCMHAManager scmhaManager) { } /** - * Check if scm is current leader. - * @throws NotLeaderException when it's not the current leader. + * return term of underlying RaftServer if role of SCM is leader. + * @throws NotLeaderException when it's not leader. */ - private void checkLeader() throws NotLeaderException { - if (!scmhaManager.isLeader()) { + private long checkLeader() throws NotLeaderException { + Optional termOpt = scmhaManager.isLeader(); + if (!termOpt.isPresent()) { throw scmhaManager.triggerNotLeaderException(); } + return termOpt.get(); } private void setBackgroundPipelineCreator( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index a2953415cb38..b71f906dfa0f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -292,6 +292,11 @@ public SCMCommandProto getCommandResponse(SCMCommand cmd) throws IOException { SCMCommandProto.Builder builder = SCMCommandProto.newBuilder(); + + // In HA mode, it is the term of current leader SCM. + // In non-HA mode, it is the default value 0. + builder.setTerm(cmd.getTerm()); + switch (cmd.getType()) { case reregisterCommand: return builder diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 5843d5afd847..501472d3bbe0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -425,7 +425,7 @@ private void initializeSystemManagers(OzoneConfiguration conf, scmNodeManager = configurator.getScmNodeManager(); } else { scmNodeManager = new SCMNodeManager( - conf, scmStorageConfig, eventQueue, clusterMap); + conf, scmStorageConfig, eventQueue, clusterMap, scmHAManager); } placementMetrics = SCMContainerPlacementMetrics.create(); @@ -1027,7 +1027,7 @@ public ReplicationManager getReplicationManager() { * @return - if the current scm is the leader. */ public boolean checkLeader() { - return scmHAManager.isLeader(); + return scmHAManager.isLeader().isPresent(); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java index ac58438d9477..ab329a567039 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java @@ -24,6 +24,7 @@ import java.util.EnumMap; import java.util.List; import java.util.Map; +import java.util.Optional; import com.google.protobuf.InvalidProtocolBufferException; import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; @@ -79,8 +80,8 @@ public void start() throws IOException { * {@inheritDoc} */ @Override - public boolean isLeader() { - return isLeader; + public Optional isLeader() { + return isLeader ? Optional.of((long)0) : Optional.empty(); } public void setIsLeader(boolean isLeader) { diff --git a/pom.xml b/pom.xml index 05c34d55bd41..d7f9a060ce5d 100644 --- a/pom.xml +++ b/pom.xml @@ -79,7 +79,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs ${ozone.version} - 1.1.0-913f5a4-SNAPSHOT + 1.1.0-4573fb7-SNAPSHOT 0.6.0-SNAPSHOT From 34c393c000adda9f8bc232c03ec32d2f64b318de Mon Sep 17 00:00:00 2001 From: GlenGeng Date: Tue, 8 Dec 2020 20:28:28 +0800 Subject: [PATCH 50/51] HDDS-4551: Remove checkLeader in PipelineManager. (#1658) --- .../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 2 +- .../scm/pipeline/PipelineManagerV2Impl.java | 33 +--------- .../org/apache/hadoop/hdds/scm/TestUtils.java | 4 +- .../hdds/scm/block/TestBlockManager.java | 2 +- .../TestCloseContainerEventHandler.java | 2 +- .../container/TestContainerManagerImpl.java | 2 +- .../container/TestSCMContainerManager.java | 2 +- .../hadoop/hdds/scm/ha/MockSCMHAManager.java | 63 ++++++++++--------- .../hdds/scm/node/TestContainerPlacement.java | 2 +- .../scm/pipeline/TestPipelineManagerImpl.java | 61 +++++++++++++----- .../TestHealthyPipelineSafeModeRule.java | 6 +- .../TestOneReplicaPipelineSafeModeRule.java | 2 +- .../scm/safemode/TestSCMSafeModeManager.java | 12 ++-- .../server/TestSCMBlockProtocolServer.java | 2 +- .../hadoop/ozone/om/TestKeyManagerImpl.java | 2 +- 15 files changed, 99 insertions(+), 98 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index 5271ac6f8e90..966db437e7c3 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -48,7 +48,7 @@ public class SCMHAManagerImpl implements SCMHAManager { private static final Logger LOG = LoggerFactory.getLogger(SCMHAManagerImpl.class); - private final SCMRatisServerImpl ratisServer; + private final SCMRatisServer ratisServer; private final ConfigurationSource conf; /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index 48fbdbff440f..3f2f6e2beba6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -49,7 +49,6 @@ import java.util.List; import java.util.Map; import java.util.NavigableSet; -import java.util.Optional; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -144,7 +143,6 @@ public static PipelineManagerV2Impl newPipelineManager( @Override public Pipeline createPipeline(ReplicationType type, ReplicationFactor factor) throws IOException { - checkLeader(); if (!isPipelineCreationAllowed() && factor != ReplicationFactor.ONE) { LOG.debug("Pipeline creation is not allowed until safe mode prechecks " + "complete"); @@ -275,7 +273,6 @@ public List getPipelines( @Override public void addContainerToPipeline( PipelineID pipelineID, ContainerID containerID) throws IOException { - checkLeader(); lock.writeLock().lock(); try { stateManager.addContainerToPipeline(pipelineID, containerID); @@ -287,7 +284,6 @@ public void addContainerToPipeline( @Override public void removeContainerFromPipeline( PipelineID pipelineID, ContainerID containerID) throws IOException { - checkLeader(); lock.writeLock().lock(); try { stateManager.removeContainerFromPipeline(pipelineID, containerID); @@ -299,7 +295,6 @@ public void removeContainerFromPipeline( @Override public NavigableSet getContainersInPipeline( PipelineID pipelineID) throws IOException { - checkLeader(); lock.readLock().lock(); try { return stateManager.getContainers(pipelineID); @@ -310,13 +305,11 @@ public NavigableSet getContainersInPipeline( @Override public int getNumberOfContainers(PipelineID pipelineID) throws IOException { - checkLeader(); return stateManager.getNumberOfContainers(pipelineID); } @Override public void openPipeline(PipelineID pipelineId) throws IOException { - checkLeader(); lock.writeLock().lock(); try { Pipeline pipeline = stateManager.getPipeline(pipelineId); @@ -342,7 +335,6 @@ public void openPipeline(PipelineID pipelineId) throws IOException { * @throws IOException */ protected void removePipeline(Pipeline pipeline) throws IOException { - checkLeader(); pipelineFactory.close(pipeline.getType(), pipeline); PipelineID pipelineID = pipeline.getId(); lock.writeLock().lock(); @@ -364,7 +356,6 @@ protected void removePipeline(Pipeline pipeline) throws IOException { */ protected void closeContainersForPipeline(final PipelineID pipelineId) throws IOException { - checkLeader(); Set containerIDs = stateManager.getContainers(pipelineId); for (ContainerID containerID : containerIDs) { eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID); @@ -380,7 +371,6 @@ protected void closeContainersForPipeline(final PipelineID pipelineId) @Override public void closePipeline(Pipeline pipeline, boolean onTimeout) throws IOException { - checkLeader(); PipelineID pipelineID = pipeline.getId(); lock.writeLock().lock(); try { @@ -410,8 +400,6 @@ public void closePipeline(Pipeline pipeline, boolean onTimeout) @Override public void scrubPipeline(ReplicationType type, ReplicationFactor factor) throws IOException { - checkLeader(); - Instant currentTime = Instant.now(); Long pipelineScrubTimeoutInMills = conf.getTimeDuration( ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, @@ -484,7 +472,6 @@ public int minPipelineLimit(Pipeline pipeline) { @Override public void activatePipeline(PipelineID pipelineID) throws IOException { - checkLeader(); stateManager.updatePipelineState(pipelineID.getProtobuf(), HddsProtos.PipelineState.PIPELINE_OPEN); } @@ -498,7 +485,6 @@ public void activatePipeline(PipelineID pipelineID) @Override public void deactivatePipeline(PipelineID pipelineID) throws IOException { - checkLeader(); stateManager.updatePipelineState(pipelineID.getProtobuf(), HddsProtos.PipelineState.PIPELINE_DORMANT); } @@ -513,7 +499,6 @@ public void deactivatePipeline(PipelineID pipelineID) @Override public void waitPipelineReady(PipelineID pipelineID, long timeout) throws IOException { - checkLeader(); long st = Time.monotonicNow(); if (timeout == 0) { timeout = pipelineWaitDefaultTimeout; @@ -546,7 +531,6 @@ public void waitPipelineReady(PipelineID pipelineID, long timeout) @Override public Map getPipelineInfo() throws NotLeaderException { - checkLeader(); final Map pipelineInfo = new HashMap<>(); for (Pipeline.PipelineState state : Pipeline.PipelineState.values()) { pipelineInfo.put(state.toString(), 0); @@ -632,21 +616,10 @@ public void setPipelineProvider(ReplicationType replicationType, public StateManager getStateManager() { return stateManager; } - - public void setScmhaManager(SCMHAManager scmhaManager) { - this.scmhaManager = scmhaManager; - } - /** - * return term of underlying RaftServer if role of SCM is leader. - * @throws NotLeaderException when it's not leader. - */ - private long checkLeader() throws NotLeaderException { - Optional termOpt = scmhaManager.isLeader(); - if (!termOpt.isPresent()) { - throw scmhaManager.triggerNotLeaderException(); - } - return termOpt.get(); + @VisibleForTesting + public SCMHAManager getScmhaManager() { + return scmhaManager; } private void setBackgroundPipelineCreator( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java index e5895268a214..4852fa53b419 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java @@ -475,7 +475,7 @@ public static void quasiCloseContainer(ContainerManager containerManager, public static StorageContainerManager getScmSimple(OzoneConfiguration conf) throws IOException, AuthenticationException { SCMConfigurator configurator = new SCMConfigurator(); - configurator.setSCMHAManager(MockSCMHAManager.getInstance()); + configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); return StorageContainerManager.createSCM(conf, configurator); } @@ -492,7 +492,7 @@ public static StorageContainerManager getScmSimple(OzoneConfiguration conf) public static StorageContainerManager getScm(OzoneConfiguration conf) throws IOException, AuthenticationException { SCMConfigurator configurator = new SCMConfigurator(); - configurator.setSCMHAManager(MockSCMHAManager.getInstance()); + configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); return getScm(conf, configurator); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 9eea79faefab..ebe29fb0617a 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -112,7 +112,7 @@ public void setUp() throws Exception { // Override the default Node Manager and SCMHAManager // in SCM with the Mock one. nodeManager = new MockNodeManager(true, 10); - scmHAManager = MockSCMHAManager.getInstance(); + scmHAManager = MockSCMHAManager.getInstance(true); eventQueue = new EventQueue(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java index fbe4d42b0b85..ff6ea6d691de 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java @@ -80,7 +80,7 @@ public static void setUp() throws Exception { pipelineManager = PipelineManagerV2Impl.newPipelineManager( configuration, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java index 6492e0ac614f..322b0c379ab6 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java @@ -60,7 +60,7 @@ public void setUp() throws Exception { pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE); containerManager = new ContainerManagerImpl(conf, - MockSCMHAManager.getInstance(), pipelineManager, + MockSCMHAManager.getInstance(true), pipelineManager, SCMDBDefinition.CONTAINERS.getTable(dbStore)); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java index a45d63718c19..b45f9c10c2bf 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java @@ -95,7 +95,7 @@ public static void setUp() throws Exception { SCMMetadataStore scmMetadataStore = new SCMMetadataStoreImpl(conf); pipelineManager = PipelineManagerV2Impl.newPipelineManager( conf, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), nodeManager, scmMetadataStore.getPipelineTable(), new EventQueue()); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java index ab329a567039..a5fd74878259 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java @@ -47,28 +47,16 @@ public final class MockSCMHAManager implements SCMHAManager { private final SCMRatisServer ratisServer; private boolean isLeader; - public static SCMHAManager getInstance() { - return new MockSCMHAManager(); - } - - public static SCMHAManager getLeaderInstance() { - MockSCMHAManager mockSCMHAManager = new MockSCMHAManager(); - mockSCMHAManager.setIsLeader(true); - return mockSCMHAManager; - } - - public static SCMHAManager getFollowerInstance() { - MockSCMHAManager mockSCMHAManager = new MockSCMHAManager(); - mockSCMHAManager.setIsLeader(false); - return mockSCMHAManager; + public static SCMHAManager getInstance(boolean isLeader) { + return new MockSCMHAManager(isLeader); } /** * Creates MockSCMHAManager instance. */ - private MockSCMHAManager() { + private MockSCMHAManager(boolean isLeader) { this.ratisServer = new MockRatisServer(); - this.isLeader = true; + this.isLeader = isLeader; } @Override @@ -127,7 +115,7 @@ public NotLeaderException triggerNotLeaderException() { null, new ArrayList<>()); } - private static class MockRatisServer implements SCMRatisServer { + private class MockRatisServer implements SCMRatisServer { private Map handlers = new EnumMap<>(RequestType.class); @@ -148,19 +136,32 @@ public SCMRatisResponse submitRequest(final SCMRatisRequest request) final RaftGroupMemberId raftId = RaftGroupMemberId.valueOf( RaftPeerId.valueOf("peer"), RaftGroupId.randomId()); RaftClientReply reply; - try { - final Message result = process(request); - reply = RaftClientReply.newBuilder() - .setClientId(ClientId.randomId()) - .setServerId(raftId) - .setGroupId(RaftGroupId.emptyGroupId()) - .setCallId(1L) - .setSuccess(true) - .setMessage(result) - .setException(null) - .setLogIndex(1L) - .build(); - } catch (Exception ex) { + if (isLeader().isPresent()) { + try { + final Message result = process(request); + reply = RaftClientReply.newBuilder() + .setClientId(ClientId.randomId()) + .setServerId(raftId) + .setGroupId(RaftGroupId.emptyGroupId()) + .setCallId(1L) + .setSuccess(true) + .setMessage(result) + .setException(null) + .setLogIndex(1L) + .build(); + } catch (Exception ex) { + reply = RaftClientReply.newBuilder() + .setClientId(ClientId.randomId()) + .setServerId(raftId) + .setGroupId(RaftGroupId.emptyGroupId()) + .setCallId(1L) + .setSuccess(false) + .setMessage(Message.EMPTY) + .setException(new StateMachineException(raftId, ex)) + .setLogIndex(1L) + .build(); + } + } else { reply = RaftClientReply.newBuilder() .setClientId(ClientId.randomId()) .setServerId(raftId) @@ -168,7 +169,7 @@ public SCMRatisResponse submitRequest(final SCMRatisRequest request) .setCallId(1L) .setSuccess(false) .setMessage(Message.EMPTY) - .setException(new StateMachineException(raftId, ex)) + .setException(triggerNotLeaderException()) .setLogIndex(1L) .build(); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index 7fea0c52d736..26de4fb3b3ab 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -118,7 +118,7 @@ SCMContainerManager createContainerManager(ConfigurationSource config, PipelineManager pipelineManager = PipelineManagerV2Impl.newPipelineManager( config, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), scmNodeManager, scmMetadataStore.getPipelineTable(), eventQueue); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index 51fff062af9b..1bff1e70d326 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager; -import org.apache.hadoop.hdds.scm.ha.SCMHAManager; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher; @@ -91,15 +90,10 @@ public void cleanup() throws Exception { FileUtil.fullyDelete(testDir); } - private PipelineManagerV2Impl createPipelineManager(boolean leader) + private PipelineManagerV2Impl createPipelineManager(boolean isLeader) throws IOException { - SCMHAManager scmhaManager; - if (leader) { - scmhaManager = MockSCMHAManager.getLeaderInstance(); - } else { - scmhaManager = MockSCMHAManager.getFollowerInstance(); - } - return PipelineManagerV2Impl.newPipelineManager(conf, scmhaManager, + return PipelineManagerV2Impl.newPipelineManager(conf, + MockSCMHAManager.getInstance(isLeader), new MockNodeManager(true, 20), SCMDBDefinition.PIPELINES.getTable(dbStore), new EventQueue()); @@ -195,7 +189,8 @@ public void testOpenPipelineShouldFailOnFollower() throws Exception { Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); // Change to follower - pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance()); + assert pipelineManager.getScmhaManager() instanceof MockSCMHAManager; + ((MockSCMHAManager) pipelineManager.getScmhaManager()).setIsLeader(false); try { pipelineManager.openPipeline(pipeline.getId()); } catch (NotLeaderException ex) { @@ -216,7 +211,8 @@ public void testActivatePipelineShouldFailOnFollower() throws Exception { Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); // Change to follower - pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance()); + assert pipelineManager.getScmhaManager() instanceof MockSCMHAManager; + ((MockSCMHAManager) pipelineManager.getScmhaManager()).setIsLeader(false); try { pipelineManager.activatePipeline(pipeline.getId()); } catch (NotLeaderException ex) { @@ -237,7 +233,8 @@ public void testDeactivatePipelineShouldFailOnFollower() throws Exception { Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); // Change to follower - pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance()); + assert pipelineManager.getScmhaManager() instanceof MockSCMHAManager; + ((MockSCMHAManager) pipelineManager.getScmhaManager()).setIsLeader(false); try { pipelineManager.deactivatePipeline(pipeline.getId()); } catch (NotLeaderException ex) { @@ -301,7 +298,8 @@ public void testClosePipelineShouldFailOnFollower() throws Exception { Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId())); Assert.assertEquals(ALLOCATED, pipeline.getPipelineState()); // Change to follower - pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance()); + assert pipelineManager.getScmhaManager() instanceof MockSCMHAManager; + ((MockSCMHAManager) pipelineManager.getScmhaManager()).setIsLeader(false); try { pipelineManager.closePipeline(pipeline, false); } catch (NotLeaderException ex) { @@ -494,12 +492,41 @@ public void testScrubPipeline() throws Exception { pipelineManager.close(); } - @Test (expected = NotLeaderException.class) + @Test public void testScrubPipelineShouldFailOnFollower() throws Exception { - PipelineManagerV2Impl pipelineManager = createPipelineManager(false); + // No timeout for pipeline scrubber. + conf.setTimeDuration( + OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1, + TimeUnit.MILLISECONDS); + + PipelineManagerV2Impl pipelineManager = createPipelineManager(true); pipelineManager.allowPipelineCreation(); - pipelineManager.scrubPipeline(HddsProtos.ReplicationType.RATIS, - HddsProtos.ReplicationFactor.THREE); + Pipeline pipeline = pipelineManager + .createPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + // At this point, pipeline is not at OPEN stage. + Assert.assertEquals(Pipeline.PipelineState.ALLOCATED, + pipeline.getPipelineState()); + + // pipeline should be seen in pipelineManager as ALLOCATED. + Assert.assertTrue(pipelineManager + .getPipelines(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE, + Pipeline.PipelineState.ALLOCATED).contains(pipeline)); + + // Change to follower + assert pipelineManager.getScmhaManager() instanceof MockSCMHAManager; + ((MockSCMHAManager) pipelineManager.getScmhaManager()).setIsLeader(false); + + try { + pipelineManager.scrubPipeline(HddsProtos.ReplicationType.RATIS, + HddsProtos.ReplicationFactor.THREE); + } catch (NotLeaderException ex) { + pipelineManager.close(); + return; + } + // Should not reach here. + Assert.fail(); } @Test diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java index 8426084d4434..ee1f06cbe446 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java @@ -74,7 +74,7 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines() PipelineManagerV2Impl pipelineManager = PipelineManagerV2Impl.newPipelineManager( config, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); @@ -123,7 +123,7 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { PipelineManagerV2Impl pipelineManager = PipelineManagerV2Impl.newPipelineManager( config, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); @@ -217,7 +217,7 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() PipelineManagerV2Impl pipelineManager = PipelineManagerV2Impl.newPipelineManager( config, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), nodeManager, scmMetadataStore.getPipelineTable(), eventQueue); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java index 6860da2d695c..5e41289fe60f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java @@ -84,7 +84,7 @@ private void setup(int nodes, int pipelineFactorThreeCount, pipelineManager = PipelineManagerV2Impl.newPipelineManager( ozoneConfiguration, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), mockNodeManager, scmMetadataStore.getPipelineTable(), eventQueue); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index f081bacc3f03..7bbae4f9fd18 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -303,7 +303,7 @@ public void testFailWithIncorrectValueForHealthyPipelinePercent() PipelineManager pipelineManager = PipelineManagerV2Impl.newPipelineManager( conf, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), mockNodeManager, scmMetadataStore.getPipelineTable(), queue); @@ -326,7 +326,7 @@ public void testFailWithIncorrectValueForOneReplicaPipelinePercent() PipelineManager pipelineManager = PipelineManagerV2Impl.newPipelineManager( conf, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), mockNodeManager, scmMetadataStore.getPipelineTable(), queue); @@ -348,7 +348,7 @@ public void testFailWithIncorrectValueForSafeModePercent() throws Exception { PipelineManager pipelineManager = PipelineManagerV2Impl.newPipelineManager( conf, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), mockNodeManager, scmMetadataStore.getPipelineTable(), queue); @@ -377,7 +377,7 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( PipelineManagerV2Impl pipelineManager = PipelineManagerV2Impl.newPipelineManager( conf, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), mockNodeManager, scmMetadataStore.getPipelineTable(), queue); @@ -627,7 +627,7 @@ public void testSafeModePipelineExitRule() throws Exception { PipelineManagerV2Impl pipelineManager = PipelineManagerV2Impl.newPipelineManager( config, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), nodeManager, scmMetadataStore.getPipelineTable(), queue); @@ -690,7 +690,7 @@ public void testPipelinesNotCreatedUntilPreCheckPasses() PipelineManagerV2Impl pipelineManager = PipelineManagerV2Impl.newPipelineManager( config, - MockSCMHAManager.getInstance(), + MockSCMHAManager.getInstance(true), nodeManager, scmMetadataStore.getPipelineTable(), queue); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java index f4553abd736c..a87dde9b0019 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java @@ -59,7 +59,7 @@ public void setUp() throws Exception { File dir = GenericTestUtils.getRandomizedTestDir(); config.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString()); SCMConfigurator configurator = new SCMConfigurator(); - configurator.setSCMHAManager(MockSCMHAManager.getInstance()); + configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); scm = TestUtils.getScm(config, configurator); scm.start(); scm.exitSafeMode(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index 0330b43b12e3..8c8ca7264286 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -171,7 +171,7 @@ public static void setUp() throws Exception { SCMConfigurator configurator = new SCMConfigurator(); configurator.setScmNodeManager(nodeManager); configurator.setNetworkTopology(clusterMap); - configurator.setSCMHAManager(MockSCMHAManager.getInstance()); + configurator.setSCMHAManager(MockSCMHAManager.getInstance(true)); scm = TestUtils.getScm(conf, configurator); scm.start(); scm.exitSafeMode(); From 8a84b03a6771bd8c9b4648ca4dbca8820f3a1788 Mon Sep 17 00:00:00 2001 From: Glen Geng Date: Thu, 10 Dec 2020 20:54:39 +0800 Subject: [PATCH 51/51] HDDS-4575: Refactor SCMHAManager and SCMRatisServer with RaftServer.Division --- .../hadoop/hdds/scm/ha/SCMHAManager.java | 19 ------ .../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 68 +------------------ .../hadoop/hdds/scm/ha/SCMRatisServer.java | 15 ++-- .../hdds/scm/ha/SCMRatisServerImpl.java | 68 +++++++++++-------- .../scm/pipeline/PipelineManagerV2Impl.java | 4 +- .../scm/server/SCMClientProtocolServer.java | 3 +- .../scm/server/StorageContainerManager.java | 11 --- .../hadoop/hdds/scm/ha/MockSCMHAManager.java | 42 +++--------- .../src/main/smoketest/admincli/scmha.robot | 2 +- 9 files changed, 66 insertions(+), 166 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java index 0fd5e8276045..59410b19c2df 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java @@ -17,10 +17,6 @@ package org.apache.hadoop.hdds.scm.ha; -import java.util.List; -import org.apache.ratis.protocol.RaftPeer; -import org.apache.ratis.protocol.exceptions.NotLeaderException; - import java.io.IOException; import java.util.Optional; @@ -48,23 +44,8 @@ public interface SCMHAManager { */ SCMRatisServer getRatisServer(); - /** - * Returns suggested leader from RaftServer. - */ - RaftPeer getSuggestedLeader(); - /** * Stops the HA service. */ void shutdown() throws IOException; - - /** - * Returns roles of ratis peers. - */ - List getRatisRoles(); - - /** - * Returns NotLeaderException with useful info. - */ - NotLeaderException triggerNotLeaderException(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java index 966db437e7c3..ae91fc2e8f72 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java @@ -18,14 +18,8 @@ package org.apache.hadoop.hdds.scm.ha; import com.google.common.base.Preconditions; -import java.util.List; -import java.util.stream.Collectors; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.ratis.proto.RaftProtos; -import org.apache.ratis.protocol.RaftGroupMemberId; -import org.apache.ratis.protocol.RaftPeer; -import org.apache.ratis.protocol.RaftPeerId; -import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.server.RaftServer; import org.apache.ratis.server.impl.RaftServerImpl; import org.apache.ratis.server.impl.RaftServerProxy; @@ -77,13 +71,14 @@ public Optional isLeader() { // When SCM HA is not enabled, the current SCM is always the leader. return Optional.of((long)0); } - RaftServer server = ratisServer.getServer(); + RaftServer server = ratisServer.getDivision().getRaftServer(); Preconditions.checkState(server instanceof RaftServerProxy); try { // SCM only has one raft group. RaftServerImpl serverImpl = ((RaftServerProxy) server) - .getImpl(ratisServer.getRaftGroupId()); + .getImpl(ratisServer.getDivision().getGroup().getGroupId()); if (serverImpl != null) { + // TODO: getRoleInfoProto() will be exposed from Division later. RaftProtos.RoleInfoProto roleInfoProto = serverImpl.getRoleInfoProto(); return roleInfoProto.hasLeaderInfo() ? Optional.of(roleInfoProto.getLeaderInfo().getTerm()) @@ -104,42 +99,6 @@ public SCMRatisServer getRatisServer() { return ratisServer; } - private RaftPeerId getPeerIdFromRoleInfo(RaftServerImpl serverImpl) { - if (serverImpl.isLeader()) { - return RaftPeerId.getRaftPeerId( - serverImpl.getRoleInfoProto().getLeaderInfo().toString()); - } else if (serverImpl.isFollower()) { - return RaftPeerId.valueOf( - serverImpl.getRoleInfoProto().getFollowerInfo() - .getLeaderInfo().getId().getId()); - } else { - return null; - } - } - - @Override - public RaftPeer getSuggestedLeader() { - RaftServer server = ratisServer.getServer(); - Preconditions.checkState(server instanceof RaftServerProxy); - RaftServerImpl serverImpl = null; - try { - // SCM only has one raft group. - serverImpl = ((RaftServerProxy) server) - .getImpl(ratisServer.getRaftGroupId()); - if (serverImpl != null) { - RaftPeerId peerId = getPeerIdFromRoleInfo(serverImpl); - if (peerId != null) { - return RaftPeer.newBuilder().setId(peerId).build(); - } - return null; - } - } catch (IOException ioe) { - LOG.error("Fail to get RaftServer impl and therefore it's not clear " + - "whether it's leader. ", ioe); - } - return null; - } - /** * {@inheritDoc} */ @@ -147,25 +106,4 @@ public RaftPeer getSuggestedLeader() { public void shutdown() throws IOException { ratisServer.stop(); } - - @Override - public List getRatisRoles() { - return getRatisServer() - .getRaftPeers() - .stream() - .map(peer -> peer.getAddress() == null ? "" : peer.getAddress()) - .collect(Collectors.toList()); - } - - /** - * {@inheritDoc} - */ - @Override - public NotLeaderException triggerNotLeaderException() { - return new NotLeaderException(RaftGroupMemberId.valueOf( - ratisServer.getServer().getId(), - ratisServer.getRaftGroupId()), - getSuggestedLeader(), - ratisServer.getRaftPeers()); - } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java index 2f997767cfa0..d8a78be4471b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java @@ -18,8 +18,7 @@ package org.apache.hadoop.hdds.scm.ha; import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType; -import org.apache.ratis.protocol.RaftGroupId; -import org.apache.ratis.protocol.RaftPeer; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.server.RaftServer; import java.io.IOException; @@ -40,9 +39,15 @@ SCMRatisResponse submitRequest(SCMRatisRequest request) void stop() throws IOException; - RaftServer getServer(); + RaftServer.Division getDivision(); - RaftGroupId getRaftGroupId(); + /** + * Returns roles of ratis peers. + */ + List getRatisRoles(); - List getRaftPeers(); + /** + * Returns NotLeaderException with useful info. + */ + NotLeaderException triggerNotLeaderException(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java index ab766c9f8701..3a81d2bb9033 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java @@ -23,7 +23,6 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.List; import java.util.UUID; import java.util.concurrent.ExecutionException; @@ -42,6 +41,7 @@ import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; +import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.server.RaftServer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,12 +53,8 @@ public class SCMRatisServerImpl implements SCMRatisServer { private static final Logger LOG = LoggerFactory.getLogger(SCMRatisServerImpl.class); + private final RaftServer.Division division; private final InetSocketAddress address; - private final RaftServer server; - private final RaftGroupId raftGroupId; - private final RaftGroup raftGroup; - private final RaftPeerId raftPeerId; - private final SCMStateMachine scmStateMachine; private final ClientId clientId = ClientId.randomId(); private final AtomicLong callId = new AtomicLong(); @@ -69,41 +65,49 @@ public class SCMRatisServerImpl implements SCMRatisServer { throws IOException { this.address = haConf.getRatisBindAddress(); - SCMHAGroupBuilder scmHAGroupBuilder = new SCMHAGroupBuilder(haConf, conf); - this.raftPeerId = scmHAGroupBuilder.getPeerId(); - this.raftGroupId = scmHAGroupBuilder.getRaftGroupId(); - this.raftGroup = scmHAGroupBuilder.getRaftGroup(); + SCMHAGroupBuilder haGrpBuilder = new SCMHAGroupBuilder(haConf, conf); final RaftProperties serverProperties = RatisUtil .newRaftProperties(haConf, conf); - this.scmStateMachine = new SCMStateMachine(); - this.server = RaftServer.newBuilder() - .setServerId(raftPeerId) - .setGroup(raftGroup) + + RaftServer server = RaftServer.newBuilder() + .setServerId(haGrpBuilder.getPeerId()) + .setGroup(haGrpBuilder.getRaftGroup()) .setProperties(serverProperties) - .setStateMachine(scmStateMachine) + .setStateMachine(new SCMStateMachine()) .build(); + + this.division = server.getDivision(haGrpBuilder.getRaftGroupId()); } @Override public void start() throws IOException { - server.start(); + division.getRaftServer().start(); } @Override public void registerStateMachineHandler(final RequestType handlerType, final Object handler) { - scmStateMachine.registerHandler(handlerType, handler); + ((SCMStateMachine) division.getStateMachine()) + .registerHandler(handlerType, handler); } @Override public SCMRatisResponse submitRequest(SCMRatisRequest request) throws IOException, ExecutionException, InterruptedException { - final RaftClientRequest raftClientRequest = new RaftClientRequest( - clientId, server.getId(), raftGroupId, nextCallId(), request.encode(), - RaftClientRequest.writeRequestType(), null); + final RaftClientRequest raftClientRequest = + new RaftClientRequest( + clientId, + division.getId(), + division.getGroup().getGroupId(), + nextCallId(), + request.encode(), + RaftClientRequest.writeRequestType(), + null); final RaftClientReply raftClientReply = - server.submitClientRequestAsync(raftClientRequest).get(); + division.getRaftServer() + .submitClientRequestAsync(raftClientRequest) + .get(); return SCMRatisResponse.decode(raftClientReply); } @@ -113,26 +117,30 @@ private long nextCallId() { @Override public void stop() throws IOException { - server.close(); + division.getRaftServer().close(); } @Override - public RaftServer getServer() { - return server; + public RaftServer.Division getDivision() { + return division; } @Override - public RaftGroupId getRaftGroupId() { - return raftGroupId; + public List getRatisRoles() { + return division.getGroup().getPeers().stream() + .map(peer -> peer.getAddress() == null ? "" : peer.getAddress()) + .collect(Collectors.toList()); } + /** + * {@inheritDoc} + */ @Override - public List getRaftPeers() { - return Collections.singletonList(RaftPeer.newBuilder() - .setId(raftPeerId).build()); + public NotLeaderException triggerNotLeaderException() { + return new NotLeaderException( + division.getMemberId(), null, division.getGroup().getPeers()); } - /** * If the SCM group starts from {@link ScmConfigKeys#OZONE_SCM_NAMES}, * its raft peers should locate on different nodes, and use the same port diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java index 3f2f6e2beba6..8b7d849842d0 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java @@ -589,9 +589,7 @@ public void onMessage(SCMSafeModeManager.SafeModeStatus status, startPipelineCreator(); } } catch (NotLeaderException ex) { - LOG.warn("Not the current leader SCM and cannot process pipeline" + - " creation. Suggested leader is: ", - scmhaManager.getSuggestedLeader().getAddress()); + LOG.warn("Not leader SCM, cannot process pipeline creation."); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 3ad31d7913b3..c7cf342a6d06 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -489,7 +489,8 @@ public ScmInfo getScmInfo() throws IOException { new ScmInfo.Builder() .setClusterId(scm.getScmStorageConfig().getClusterID()) .setScmId(scm.getScmStorageConfig().getScmId()) - .setRatisPeerRoles(scm.getScmHAManager().getRatisRoles()); + .setRatisPeerRoles( + scm.getScmHAManager().getRatisServer().getRatisRoles()); return builder.build(); } catch (Exception ex) { auditSuccess = false; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 501472d3bbe0..74ae7804e11e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -1030,17 +1030,6 @@ public boolean checkLeader() { return scmHAManager.isLeader().isPresent(); } - /** - * Get suggested leader from Raft. - * @return - suggested leader address. - */ - public String getSuggestedLeader() { - if (scmHAManager.getSuggestedLeader() == null) { - return null; - } - return scmHAManager.getSuggestedLeader().getAddress(); - } - public void checkAdminAccess(String remoteUser) throws IOException { if (remoteUser != null && !scmAdminUsernames.contains(remoteUser)) { throw new IOException( diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java index a5fd74878259..a624e491a644 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java @@ -33,7 +33,6 @@ import org.apache.ratis.protocol.RaftClientReply; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.protocol.RaftGroupMemberId; -import org.apache.ratis.protocol.RaftPeer; import org.apache.ratis.protocol.RaftPeerId; import org.apache.ratis.protocol.exceptions.NotLeaderException; import org.apache.ratis.server.RaftServer; @@ -76,11 +75,6 @@ public void setIsLeader(boolean isLeader) { this.isLeader = isLeader; } - @Override - public RaftPeer getSuggestedLeader() { - throw new UnsupportedOperationException(); - } - /** * {@inheritDoc} */ @@ -97,24 +91,6 @@ public void shutdown() throws IOException { ratisServer.stop(); } - @Override - public List getRatisRoles() { - return Arrays.asList( - "180.3.14.5:9865", - "180.3.14.21:9865", - "180.3.14.145:9865"); - } - - /** - * {@inheritDoc} - */ - @Override - public NotLeaderException triggerNotLeaderException() { - return new NotLeaderException(RaftGroupMemberId.valueOf( - RaftPeerId.valueOf("peer"), RaftGroupId.randomId()), - null, new ArrayList<>()); - } - private class MockRatisServer implements SCMRatisServer { private Map handlers = @@ -204,23 +180,27 @@ private Message process(final SCMRatisRequest request) } @Override - public RaftServer getServer() { - return null; + public void stop() { } @Override - public RaftGroupId getRaftGroupId() { + public RaftServer.Division getDivision() { return null; } @Override - public List getRaftPeers() { - return new ArrayList<>(); + public List getRatisRoles() { + return Arrays.asList( + "180.3.14.5:9865", + "180.3.14.21:9865", + "180.3.14.145:9865"); } @Override - public void stop() { + public NotLeaderException triggerNotLeaderException() { + return new NotLeaderException(RaftGroupMemberId.valueOf( + RaftPeerId.valueOf("peer"), RaftGroupId.randomId()), + null, new ArrayList<>()); } } - } \ No newline at end of file diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot index 31a990f857d7..4d7c23237bfc 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot @@ -25,4 +25,4 @@ Test Timeout 5 minutes *** Test Cases *** Run scm roles ${output} = Execute ozone admin scm roles - Should contain ${output} [] + Should contain ${output} [scm:9865]