-
Notifications
You must be signed in to change notification settings - Fork 25.7k
Log leader and handshake failures by default #42342
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 4 commits
9366fb4
9c64b02
bd0c29c
ed6a2e2
7013637
7a32587
f55db2a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,6 +22,7 @@ | |
| import org.apache.logging.log4j.LogManager; | ||
| import org.apache.logging.log4j.Logger; | ||
| import org.apache.logging.log4j.message.ParameterizedMessage; | ||
| import org.elasticsearch.ElasticsearchException; | ||
| import org.elasticsearch.cluster.node.DiscoveryNode; | ||
| import org.elasticsearch.cluster.node.DiscoveryNodes; | ||
| import org.elasticsearch.common.Nullable; | ||
|
|
@@ -33,6 +34,7 @@ | |
| import org.elasticsearch.common.unit.TimeValue; | ||
| import org.elasticsearch.threadpool.ThreadPool.Names; | ||
| import org.elasticsearch.transport.ConnectTransportException; | ||
| import org.elasticsearch.transport.NodeNotConnectedException; | ||
| import org.elasticsearch.transport.TransportConnectionListener; | ||
| import org.elasticsearch.transport.TransportException; | ||
| import org.elasticsearch.transport.TransportRequest; | ||
|
|
@@ -48,6 +50,7 @@ | |
| import java.util.concurrent.atomic.AtomicBoolean; | ||
| import java.util.concurrent.atomic.AtomicLong; | ||
| import java.util.concurrent.atomic.AtomicReference; | ||
| import java.util.function.Consumer; | ||
|
|
||
| /** | ||
| * The LeaderChecker is responsible for allowing followers to check that the currently elected leader is still connected and healthy. We are | ||
|
|
@@ -75,20 +78,17 @@ public class LeaderChecker { | |
| public static final Setting<Integer> LEADER_CHECK_RETRY_COUNT_SETTING = | ||
| Setting.intSetting("cluster.fault_detection.leader_check.retry_count", 3, 1, Setting.Property.NodeScope); | ||
|
|
||
| private final Settings settings; | ||
|
|
||
| private final TimeValue leaderCheckInterval; | ||
| private final TimeValue leaderCheckTimeout; | ||
| private final int leaderCheckRetryCount; | ||
| private final TransportService transportService; | ||
| private final Runnable onLeaderFailure; | ||
| private final Consumer<Exception> onLeaderFailure; | ||
|
|
||
| private AtomicReference<CheckScheduler> currentChecker = new AtomicReference<>(); | ||
|
|
||
| private volatile DiscoveryNodes discoveryNodes; | ||
|
|
||
| public LeaderChecker(final Settings settings, final TransportService transportService, final Runnable onLeaderFailure) { | ||
| this.settings = settings; | ||
| public LeaderChecker(final Settings settings, final TransportService transportService, final Consumer<Exception> onLeaderFailure) { | ||
| leaderCheckInterval = LEADER_CHECK_INTERVAL_SETTING.get(settings); | ||
| leaderCheckTimeout = LEADER_CHECK_TIMEOUT_SETTING.get(settings); | ||
| leaderCheckRetryCount = LEADER_CHECK_RETRY_COUNT_SETTING.get(settings); | ||
|
|
@@ -234,16 +234,19 @@ public void handleException(TransportException exp) { | |
| } | ||
|
|
||
| if (exp instanceof ConnectTransportException || exp.getCause() instanceof ConnectTransportException) { | ||
| logger.debug(new ParameterizedMessage("leader [{}] disconnected, failing immediately", leader), exp); | ||
| leaderFailed(); | ||
| logger.debug(new ParameterizedMessage( | ||
| "leader [{}] disconnected during check, restarting discovery", leader), exp); | ||
| leaderFailed(new ConnectTransportException(leader, "disconnected during check", exp)); | ||
| return; | ||
| } | ||
|
|
||
| long failureCount = failureCountSinceLastSuccess.incrementAndGet(); | ||
| if (failureCount >= leaderCheckRetryCount) { | ||
| logger.debug(new ParameterizedMessage("{} consecutive failures (limit [{}] is {}) so leader [{}] has failed", | ||
| failureCount, LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), leaderCheckRetryCount, leader), exp); | ||
| leaderFailed(); | ||
| logger.debug(new ParameterizedMessage( | ||
| "leader [{}] has failed {} consecutive checks (limit [{}] is {}), restarting discovery; last failure was:", | ||
| leader, failureCount, LEADER_CHECK_RETRY_COUNT_SETTING.getKey(), leaderCheckRetryCount), exp); | ||
| leaderFailed(new ElasticsearchException( | ||
| "node [" + leader + "] failed [" + failureCount + "] consecutive checks", exp)); | ||
| return; | ||
| } | ||
|
|
||
|
|
@@ -259,17 +262,28 @@ public String executor() { | |
| }); | ||
| } | ||
|
|
||
| void leaderFailed() { | ||
| void leaderFailed(Exception e) { | ||
| if (isClosed.compareAndSet(false, true)) { | ||
| transportService.getThreadPool().generic().execute(onLeaderFailure); | ||
| transportService.getThreadPool().generic().execute(new Runnable() { | ||
| @Override | ||
| public void run() { | ||
| onLeaderFailure.accept(e); | ||
| } | ||
|
|
||
| @Override | ||
| public String toString() { | ||
| return "notification of leader failure: " + e.getMessage(); | ||
| } | ||
| }); | ||
| } else { | ||
| logger.trace("already closed, not failing leader"); | ||
| } | ||
| } | ||
|
|
||
| void handleDisconnectedNode(DiscoveryNode discoveryNode) { | ||
| if (discoveryNode.equals(leader)) { | ||
| leaderFailed(); | ||
| logger.debug("leader [{}] disconnected, restarting discovery", leader); | ||
| leaderFailed(new NodeNotConnectedException(discoveryNode, "disconnected")); | ||
|
||
| } | ||
| } | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -21,6 +21,7 @@ | |
|
|
||
| import org.apache.logging.log4j.LogManager; | ||
| import org.apache.logging.log4j.Logger; | ||
| import org.apache.logging.log4j.message.ParameterizedMessage; | ||
| import org.elasticsearch.Version; | ||
| import org.elasticsearch.action.ActionListener; | ||
| import org.elasticsearch.cluster.node.DiscoveryNode; | ||
|
|
@@ -89,6 +90,13 @@ protected void doRun() throws Exception { | |
| remoteNode = transportService.handshake(connection, probeHandshakeTimeout.millis()); | ||
| // success means (amongst other things) that the cluster names match | ||
| logger.trace("[{}] handshake successful: {}", this, remoteNode); | ||
| } catch (Exception e) { | ||
| // we opened a connection and successfully performed a low-level handshake, so we were definitely talking to an | ||
| // Elasticsearch node, but the high-level handshake failed indicating some kind of mismatched configurations | ||
| // (e.g. cluster name) that the user should address | ||
| logger.warn(new ParameterizedMessage("handshake failed for [{}]", this), e); | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. can we do this only for the handshake bit? Can we avoid logging if it is truly a connection failure? e.g. ConnectTransportException.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I don't follow. As the comment says, we know that we successfully connected and performed a low-level handshake, so even a
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I wanted to avoid for a flaky network connection to cause extra verbose warnings here. I'm fine if you want to leave it this way though |
||
| listener.onFailure(e); | ||
| return; | ||
| } finally { | ||
| IOUtils.closeWhileHandlingException(connection); | ||
| } | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
"restarting discovery" does not belong to a message in this class (it assumes stuff about calling context). Perhaps just leave that part out
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Good point, this also moved to the message in
Coordinator. Fixed inf55db2a.