Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion docs/root/configuration/health_checkers/redis.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ The Redis health checker is a custom health checker (with :code:`envoy.health_ch
which checks Redis upstream hosts. It sends a Redis PING command and expect a PONG response. The upstream
Redis server can respond with anything other than PONG to cause an immediate active health check failure.
Optionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist it is considered a
passing healthcheck. This allows the user to mark a Redis instance for maintenance by setting the
passing health check. This allows the user to mark a Redis instance for maintenance by setting the
specified :ref:`key <envoy_api_field_config.health_checker.redis.v2.Redis.key>` to any value and waiting
for traffic to drain.

Expand Down
2 changes: 1 addition & 1 deletion docs/root/intro/arch_overview/init.rst
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ accepting new connections.
:ref:`CDS <arch_overview_dynamic_config_cds>` if applicable, waits for one response (or failure),
and does the same primary/secondary initialization of CDS provided clusters.
* If clusters use :ref:`active health checking <arch_overview_health_checking>`, Envoy also does a
single active HC round.
single active health check round.
* Once cluster manager initialization is done, :ref:`RDS <arch_overview_dynamic_config_rds>` and
:ref:`LDS <arch_overview_dynamic_config_lds>` initialize (if applicable). The server
doesn't start accepting connections until there has been at least one response (or failure) for
Expand Down
2 changes: 1 addition & 1 deletion docs/root/intro/arch_overview/service_discovery.rst
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ paradigm has a number of benefits:
whether to route to a host:

.. csv-table::
:header: Discovery Status, HC OK, HC Failed
:header: Discovery Status, Health Check OK, Health Check Failed
:widths: 1, 1, 2

Discovered, Route, Don't Route
Expand Down
2 changes: 1 addition & 1 deletion docs/root/operations/fs_flags.rst
Original file line number Diff line number Diff line change
Expand Up @@ -10,5 +10,5 @@ in the directory specified in the :ref:`flags_path
option. The currently supported flag files are:

drain
If this file exists, Envoy will start in HC failing mode, similar to after the
If this file exists, Envoy will start in health check failing mode, similar to after the
:http:post:`/healthcheck/fail` command has been executed.
4 changes: 2 additions & 2 deletions include/envoy/stream_info/stream_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -260,9 +260,9 @@ class StreamInfo {
virtual bool healthCheck() const PURE;

/**
* @param is_hc whether the request is a health check request or not.
* @param is_health_check whether the request is a health check request or not.
*/
virtual void healthCheck(bool is_hc) PURE;
virtual void healthCheck(bool is_health_check) PURE;

/**
* @param downstream_local_address sets the local address of the downstream connection. Note that
Expand Down
3 changes: 2 additions & 1 deletion include/envoy/upstream/health_check_host_monitor.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@ namespace Upstream {
/**
* A monitor for "passive" health check events that might happen on every thread. For example, if a
* special HTTP header is received, the data plane may decide to fast fail a host to avoid waiting
* for the full HC interval to elapse before determining the host is active HC failed.
* for the full Health Check interval to elapse before determining the host is active health check
* failed.
*/
class HealthCheckHostMonitor {
public:
Expand Down
2 changes: 1 addition & 1 deletion source/common/access_log/access_log_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ class OrFilter : public OperatorFilter {
};

/**
* Filter out HC requests.
* Filter out health check requests.
*/
class NotHealthCheckFilter : public Filter {
public:
Expand Down
8 changes: 4 additions & 4 deletions source/common/config/cds_json.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,14 +30,14 @@ void CdsJson::translateHealthCheck(const Json::Object& json_health_check,
JSON_UTIL_SET_INTEGER(json_health_check, health_check, healthy_threshold);
JSON_UTIL_SET_BOOL(json_health_check, health_check, reuse_connection);

const std::string hc_type = json_health_check.getString("type");
if (hc_type == "http") {
const std::string health_check_type = json_health_check.getString("type");
if (health_check_type == "http") {
health_check.mutable_http_health_check()->set_path(json_health_check.getString("path"));
if (json_health_check.hasObject("service_name")) {
health_check.mutable_http_health_check()->set_service_name(
json_health_check.getString("service_name"));
}
} else if (hc_type == "tcp") {
} else if (health_check_type == "tcp") {
auto* tcp_health_check = health_check.mutable_tcp_health_check();
std::string send_text;
for (const Json::ObjectSharedPtr& entry : json_health_check.getObjectArray("send")) {
Expand All @@ -52,7 +52,7 @@ void CdsJson::translateHealthCheck(const Json::Object& json_health_check,
tcp_health_check->mutable_receive()->Add()->set_text(hex_string);
}
} else {
ASSERT(hc_type == "redis");
ASSERT(health_check_type == "redis");
auto* redis_health_check = health_check.mutable_custom_health_check();
redis_health_check->set_name("envoy.health_checkers.redis");
if (json_health_check.hasObject("redis_key")) {
Expand Down
2 changes: 1 addition & 1 deletion source/common/http/conn_manager_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ void ConnectionManagerImpl::ActiveStream::chargeStats(const HeaderMap& headers)
uint64_t response_code = Utility::getResponseStatus(headers);
stream_info_.response_code_ = response_code;

if (stream_info_.hc_request_) {
if (stream_info_.health_check_request_) {
return;
}

Expand Down
6 changes: 3 additions & 3 deletions source/common/stream_info/stream_info_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -152,9 +152,9 @@ struct StreamInfoImpl : public StreamInfo {
return upstream_local_address_;
}

bool healthCheck() const override { return hc_request_; }
bool healthCheck() const override { return health_check_request_; }

void healthCheck(bool is_hc) override { hc_request_ = is_hc; }
void healthCheck(bool is_health_check) override { health_check_request_ = is_health_check; }

void setDownstreamLocalAddress(
const Network::Address::InstanceConstSharedPtr& downstream_local_address) override {
Expand Down Expand Up @@ -218,7 +218,7 @@ struct StreamInfoImpl : public StreamInfo {
absl::optional<uint32_t> response_code_;
uint64_t response_flags_{};
Upstream::HostDescriptionConstSharedPtr upstream_host_{};
bool hc_request_{};
bool health_check_request_{};
const Router::RouteEntry* route_entry_{};
envoy::api::v2::core::Metadata metadata_{};
FilterStateImpl filter_state_{};
Expand Down
2 changes: 1 addition & 1 deletion source/common/tracing/http_tracer_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ const std::string& HttpTracerUtility::toString(OperationName operation_name) {

Decision HttpTracerUtility::isTracing(const StreamInfo::StreamInfo& stream_info,
const Http::HeaderMap& request_headers) {
// Exclude HC requests immediately.
// Exclude health check requests immediately.
if (stream_info.healthCheck()) {
return {Reason::HealthCheck, false};
}
Expand Down
24 changes: 12 additions & 12 deletions source/common/upstream/health_checker_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,37 +45,37 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec
};

HealthCheckerSharedPtr
HealthCheckerFactory::create(const envoy::api::v2::core::HealthCheck& hc_config,
HealthCheckerFactory::create(const envoy::api::v2::core::HealthCheck& health_check_config,
Upstream::Cluster& cluster, Runtime::Loader& runtime,
Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher,
AccessLog::AccessLogManager& log_manager) {
HealthCheckEventLoggerPtr event_logger;
if (!hc_config.event_log_path().empty()) {
if (!health_check_config.event_log_path().empty()) {
event_logger = std::make_unique<HealthCheckEventLoggerImpl>(
log_manager, dispatcher.timeSystem(), hc_config.event_log_path());
log_manager, dispatcher.timeSystem(), health_check_config.event_log_path());
}
switch (hc_config.health_checker_case()) {
switch (health_check_config.health_checker_case()) {
case envoy::api::v2::core::HealthCheck::HealthCheckerCase::kHttpHealthCheck:
return std::make_shared<ProdHttpHealthCheckerImpl>(cluster, hc_config, dispatcher, runtime,
random, std::move(event_logger));
return std::make_shared<ProdHttpHealthCheckerImpl>(cluster, health_check_config, dispatcher,
runtime, random, std::move(event_logger));
case envoy::api::v2::core::HealthCheck::HealthCheckerCase::kTcpHealthCheck:
return std::make_shared<TcpHealthCheckerImpl>(cluster, hc_config, dispatcher, runtime, random,
std::move(event_logger));
return std::make_shared<TcpHealthCheckerImpl>(cluster, health_check_config, dispatcher, runtime,
random, std::move(event_logger));
case envoy::api::v2::core::HealthCheck::HealthCheckerCase::kGrpcHealthCheck:
if (!(cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP2)) {
throw EnvoyException(fmt::format("{} cluster must support HTTP/2 for gRPC healthchecking",
cluster.info()->name()));
}
return std::make_shared<ProdGrpcHealthCheckerImpl>(cluster, hc_config, dispatcher, runtime,
random, std::move(event_logger));
return std::make_shared<ProdGrpcHealthCheckerImpl>(cluster, health_check_config, dispatcher,
runtime, random, std::move(event_logger));
case envoy::api::v2::core::HealthCheck::HealthCheckerCase::kCustomHealthCheck: {
auto& factory =
Config::Utility::getAndCheckFactory<Server::Configuration::CustomHealthCheckerFactory>(
std::string(hc_config.custom_health_check().name()));
std::string(health_check_config.custom_health_check().name()));
std::unique_ptr<Server::Configuration::HealthCheckerFactoryContext> context(
new HealthCheckerFactoryContextImpl(cluster, runtime, random, dispatcher,
std::move(event_logger)));
return factory.createCustomHealthChecker(hc_config, *context);
return factory.createCustomHealthChecker(health_check_config, *context);
}
default:
// Checked by schema.
Expand Down
4 changes: 2 additions & 2 deletions source/common/upstream/health_checker_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,15 @@ class HealthCheckerFactory : public Logger::Loggable<Logger::Id::health_checker>
public:
/**
* Create a health checker.
* @param hc_config supplies the health check proto.
* @param health_check_config supplies the health check proto.
* @param cluster supplies the owning cluster.
* @param runtime supplies the runtime loader.
* @param random supplies the random generator.
* @param dispatcher supplies the dispatcher.
* @param event_logger supplies the event_logger.
* @return a health checker.
*/
static HealthCheckerSharedPtr create(const envoy::api::v2::core::HealthCheck& hc_config,
static HealthCheckerSharedPtr create(const envoy::api::v2::core::HealthCheck& health_check_config,
Upstream::Cluster& cluster, Runtime::Loader& runtime,
Runtime::RandomGenerator& random,
Event::Dispatcher& dispatcher,
Expand Down
4 changes: 2 additions & 2 deletions source/common/upstream/upstream_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -749,10 +749,10 @@ void ClusterImplBase::setOutlierDetector(const Outlier::DetectorSharedPtr& outli
}

void ClusterImplBase::reloadHealthyHosts() {
// Every time a host changes HC state we cause a full healthy host recalculation which
// Every time a host changes Health Check state we cause a full healthy host recalculation which
// for expensive LBs (ring, subset, etc.) can be quite time consuming. During startup, this
// can also block worker threads by doing this repeatedly. There is no reason to do this
// as we will not start taking traffic until we are initialized. By blocking HC updates
// as we will not start taking traffic until we are initialized. By blocking Health Check updates
// while initializing we can avoid this.
if (initialization_complete_callback_ != nullptr) {
return;
Expand Down
2 changes: 1 addition & 1 deletion source/extensions/health_checkers/redis/redis.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase {
// Extensions::NetworkFilters::RedisProxy::ConnPool::Config
bool disableOutlierEvents() const override { return true; }
std::chrono::milliseconds opTimeout() const override {
// Allow the main HC infra to control timeout.
// Allow the main Health Check infra to control timeout.
return parent_.timeout_ * 2;
}

Expand Down
4 changes: 2 additions & 2 deletions source/extensions/health_checkers/redis/utility.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,10 +14,10 @@ namespace RedisHealthChecker {
namespace {

static const envoy::config::health_checker::redis::v2::Redis
getRedisHealthCheckConfig(const envoy::api::v2::core::HealthCheck& hc_config) {
getRedisHealthCheckConfig(const envoy::api::v2::core::HealthCheck& health_check_config) {
ProtobufTypes::MessagePtr config =
ProtobufTypes::MessagePtr{new envoy::config::health_checker::redis::v2::Redis()};
MessageUtil::jsonConvert(hc_config.custom_health_check().config(), *config);
MessageUtil::jsonConvert(health_check_config.custom_health_check().config(), *config);
return MessageUtil::downcastAndValidate<const envoy::config::health_checker::redis::v2::Redis&>(
*config);
}
Expand Down
2 changes: 1 addition & 1 deletion source/server/drain_manager_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ DrainManagerImpl::DrainManagerImpl(Instance& server, envoy::api::v2::Listener::D
: server_(server), drain_type_(drain_type) {}

bool DrainManagerImpl::drainClose() const {
// If we are actively HC failed and the drain type is default, always drain close.
// If we are actively health check failed and the drain type is default, always drain close.
//
// TODO(mattklein123): In relation to x-envoy-immediate-health-check-fail, it would be better
// if even in the case of server health check failure we had some period of drain ramp up. This
Expand Down
6 changes: 3 additions & 3 deletions test/common/access_log/access_log_impl_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -351,7 +351,7 @@ TEST_F(AccessLogImplTest, healthCheckTrue) {
InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromJson(json), context_);

Http::TestHeaderMapImpl header_map{};
stream_info_.hc_request_ = true;
stream_info_.health_check_request_ = true;
EXPECT_CALL(*file_, write(_)).Times(0);

log->log(&header_map, &response_headers_, &response_trailers_, stream_info_);
Expand Down Expand Up @@ -464,7 +464,7 @@ TEST_F(AccessLogImplTest, andFilter) {
{
EXPECT_CALL(*file_, write(_)).Times(0);
Http::TestHeaderMapImpl header_map{};
stream_info_.hc_request_ = true;
stream_info_.health_check_request_ = true;
log->log(&header_map, &response_headers_, &response_trailers_, stream_info_);
}
}
Expand Down Expand Up @@ -527,7 +527,7 @@ TEST_F(AccessLogImplTest, multipleOperators) {
{
EXPECT_CALL(*file_, write(_)).Times(0);
Http::TestHeaderMapImpl header_map{};
stream_info_.hc_request_ = true;
stream_info_.health_check_request_ = true;

log->log(&header_map, &response_headers_, &response_trailers_, stream_info_);
}
Expand Down
6 changes: 3 additions & 3 deletions test/common/stream_info/test_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,8 @@ class TestStreamInfo : public StreamInfo::StreamInfo {
const Network::Address::InstanceConstSharedPtr& upstreamLocalAddress() const override {
return upstream_local_address_;
}
bool healthCheck() const override { return hc_request_; }
void healthCheck(bool is_hc) override { hc_request_ = is_hc; }
bool healthCheck() const override { return health_check_request_; }
void healthCheck(bool is_health_check) override { health_check_request_ = is_health_check; }

void setDownstreamLocalAddress(
const Network::Address::InstanceConstSharedPtr& downstream_local_address) override {
Expand Down Expand Up @@ -194,7 +194,7 @@ class TestStreamInfo : public StreamInfo::StreamInfo {
absl::optional<uint32_t> response_code_;
uint64_t response_flags_{};
Upstream::HostDescriptionConstSharedPtr upstream_host_{};
bool hc_request_{};
bool health_check_request_{};
Network::Address::InstanceConstSharedPtr upstream_local_address_;
Network::Address::InstanceConstSharedPtr downstream_local_address_;
Network::Address::InstanceConstSharedPtr downstream_direct_remote_address_;
Expand Down
2 changes: 1 addition & 1 deletion test/common/tracing/http_tracer_impl_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ TEST(HttpTracerUtilityTest, IsTracing) {
EXPECT_TRUE(result.traced);
}

// HC request.
// Health Check request.
{
Http::TestHeaderMapImpl traceable_header_hc{{"x-request-id", forced_guid}};
EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(true));
Expand Down
32 changes: 16 additions & 16 deletions test/extensions/health_checkers/redis/redis_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,12 @@ class RedisHealthCheckerTest
config:
)EOF";

const auto& hc_config = Upstream::parseHealthCheckFromV2Yaml(yaml);
const auto& redis_config = getRedisHealthCheckConfig(hc_config);
const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml);
const auto& redis_config = getRedisHealthCheckConfig(health_check_config);

health_checker_.reset(
new RedisHealthChecker(*cluster_, hc_config, redis_config, dispatcher_, runtime_, random_,
Upstream::HealthCheckEventLoggerPtr(event_logger_), *this));
new RedisHealthChecker(*cluster_, health_check_config, redis_config, dispatcher_, runtime_,
random_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *this));
}

void setupAlwaysLogHealthCheckFailures() {
Expand All @@ -67,12 +67,12 @@ class RedisHealthCheckerTest
config:
)EOF";

const auto& hc_config = Upstream::parseHealthCheckFromV2Yaml(yaml);
const auto& redis_config = getRedisHealthCheckConfig(hc_config);
const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml);
const auto& redis_config = getRedisHealthCheckConfig(health_check_config);

health_checker_.reset(
new RedisHealthChecker(*cluster_, hc_config, redis_config, dispatcher_, runtime_, random_,
Upstream::HealthCheckEventLoggerPtr(event_logger_), *this));
new RedisHealthChecker(*cluster_, health_check_config, redis_config, dispatcher_, runtime_,
random_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *this));
}

void setupExistsHealthcheck() {
Expand All @@ -89,12 +89,12 @@ class RedisHealthCheckerTest
key: foo
)EOF";

const auto& hc_config = Upstream::parseHealthCheckFromV2Yaml(yaml);
const auto& redis_config = getRedisHealthCheckConfig(hc_config);
const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml);
const auto& redis_config = getRedisHealthCheckConfig(health_check_config);

health_checker_.reset(
new RedisHealthChecker(*cluster_, hc_config, redis_config, dispatcher_, runtime_, random_,
Upstream::HealthCheckEventLoggerPtr(event_logger_), *this));
new RedisHealthChecker(*cluster_, health_check_config, redis_config, dispatcher_, runtime_,
random_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *this));
}

void setupDontReuseConnection() {
Expand All @@ -111,12 +111,12 @@ class RedisHealthCheckerTest
config:
)EOF";

const auto& hc_config = Upstream::parseHealthCheckFromV2Yaml(yaml);
const auto& redis_config = getRedisHealthCheckConfig(hc_config);
const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml);
const auto& redis_config = getRedisHealthCheckConfig(health_check_config);

health_checker_.reset(
new RedisHealthChecker(*cluster_, hc_config, redis_config, dispatcher_, runtime_, random_,
Upstream::HealthCheckEventLoggerPtr(event_logger_), *this));
new RedisHealthChecker(*cluster_, health_check_config, redis_config, dispatcher_, runtime_,
random_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *this));
}

Extensions::NetworkFilters::RedisProxy::ConnPool::ClientPtr
Expand Down
6 changes: 3 additions & 3 deletions test/integration/hds_integration_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -167,9 +167,9 @@ class HdsIntegrationTest : public HttpIntegrationTest,
health_check->mutable_health_checks(0)->mutable_interval()->set_seconds(MaxTimeout);
health_check->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2);
health_check->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2);
auto* tcp_hc = health_check->mutable_health_checks(0)->mutable_tcp_health_check();
tcp_hc->mutable_send()->set_text("50696E67");
tcp_hc->add_receive()->set_text("506F6E67");
auto* tcp_health_check = health_check->mutable_health_checks(0)->mutable_tcp_health_check();
tcp_health_check->mutable_send()->set_text("50696E67");
tcp_health_check->add_receive()->set_text("506F6E67");

return server_health_check_specifier_;
}
Expand Down
Loading