diff --git a/docs/root/configuration/health_checkers/redis.rst b/docs/root/configuration/health_checkers/redis.rst index 1859c005adb1e..37af55665b53a 100644 --- a/docs/root/configuration/health_checkers/redis.rst +++ b/docs/root/configuration/health_checkers/redis.rst @@ -7,7 +7,7 @@ The Redis health checker is a custom health checker (with :code:`envoy.health_ch which checks Redis upstream hosts. It sends a Redis PING command and expect a PONG response. The upstream Redis server can respond with anything other than PONG to cause an immediate active health check failure. Optionally, Envoy can perform EXISTS on a user-specified key. If the key does not exist it is considered a -passing healthcheck. This allows the user to mark a Redis instance for maintenance by setting the +passing health check. This allows the user to mark a Redis instance for maintenance by setting the specified :ref:`key ` to any value and waiting for traffic to drain. diff --git a/docs/root/intro/arch_overview/init.rst b/docs/root/intro/arch_overview/init.rst index 1a82a749ec1c2..af0320a57f322 100644 --- a/docs/root/intro/arch_overview/init.rst +++ b/docs/root/intro/arch_overview/init.rst @@ -11,7 +11,7 @@ accepting new connections. :ref:`CDS ` if applicable, waits for one response (or failure), and does the same primary/secondary initialization of CDS provided clusters. * If clusters use :ref:`active health checking `, Envoy also does a - single active HC round. + single active health check round. * Once cluster manager initialization is done, :ref:`RDS ` and :ref:`LDS ` initialize (if applicable). The server doesn't start accepting connections until there has been at least one response (or failure) for diff --git a/docs/root/intro/arch_overview/service_discovery.rst b/docs/root/intro/arch_overview/service_discovery.rst index 52ef417b4034a..f950b82b667a2 100644 --- a/docs/root/intro/arch_overview/service_discovery.rst +++ b/docs/root/intro/arch_overview/service_discovery.rst @@ -122,7 +122,7 @@ paradigm has a number of benefits: whether to route to a host: .. csv-table:: - :header: Discovery Status, HC OK, HC Failed + :header: Discovery Status, Health Check OK, Health Check Failed :widths: 1, 1, 2 Discovered, Route, Don't Route diff --git a/docs/root/operations/fs_flags.rst b/docs/root/operations/fs_flags.rst index 2f4a3b21b6682..a4c154bd207a6 100644 --- a/docs/root/operations/fs_flags.rst +++ b/docs/root/operations/fs_flags.rst @@ -10,5 +10,5 @@ in the directory specified in the :ref:`flags_path option. The currently supported flag files are: drain - If this file exists, Envoy will start in HC failing mode, similar to after the + If this file exists, Envoy will start in health check failing mode, similar to after the :http:post:`/healthcheck/fail` command has been executed. diff --git a/include/envoy/stream_info/stream_info.h b/include/envoy/stream_info/stream_info.h index d9145a2571630..14c01cc4486c8 100644 --- a/include/envoy/stream_info/stream_info.h +++ b/include/envoy/stream_info/stream_info.h @@ -260,9 +260,9 @@ class StreamInfo { virtual bool healthCheck() const PURE; /** - * @param is_hc whether the request is a health check request or not. + * @param is_health_check whether the request is a health check request or not. */ - virtual void healthCheck(bool is_hc) PURE; + virtual void healthCheck(bool is_health_check) PURE; /** * @param downstream_local_address sets the local address of the downstream connection. Note that diff --git a/include/envoy/upstream/health_check_host_monitor.h b/include/envoy/upstream/health_check_host_monitor.h index 53a24801a5e77..1358760550961 100644 --- a/include/envoy/upstream/health_check_host_monitor.h +++ b/include/envoy/upstream/health_check_host_monitor.h @@ -10,7 +10,8 @@ namespace Upstream { /** * A monitor for "passive" health check events that might happen on every thread. For example, if a * special HTTP header is received, the data plane may decide to fast fail a host to avoid waiting - * for the full HC interval to elapse before determining the host is active HC failed. + * for the full Health Check interval to elapse before determining the host is active health check + * failed. */ class HealthCheckHostMonitor { public: diff --git a/source/common/access_log/access_log_impl.h b/source/common/access_log/access_log_impl.h index 5c875a33f1770..34c77a22164f3 100644 --- a/source/common/access_log/access_log_impl.h +++ b/source/common/access_log/access_log_impl.h @@ -109,7 +109,7 @@ class OrFilter : public OperatorFilter { }; /** - * Filter out HC requests. + * Filter out health check requests. */ class NotHealthCheckFilter : public Filter { public: diff --git a/source/common/config/cds_json.cc b/source/common/config/cds_json.cc index a7a684a590d62..6addcc437a5d3 100644 --- a/source/common/config/cds_json.cc +++ b/source/common/config/cds_json.cc @@ -30,14 +30,14 @@ void CdsJson::translateHealthCheck(const Json::Object& json_health_check, JSON_UTIL_SET_INTEGER(json_health_check, health_check, healthy_threshold); JSON_UTIL_SET_BOOL(json_health_check, health_check, reuse_connection); - const std::string hc_type = json_health_check.getString("type"); - if (hc_type == "http") { + const std::string health_check_type = json_health_check.getString("type"); + if (health_check_type == "http") { health_check.mutable_http_health_check()->set_path(json_health_check.getString("path")); if (json_health_check.hasObject("service_name")) { health_check.mutable_http_health_check()->set_service_name( json_health_check.getString("service_name")); } - } else if (hc_type == "tcp") { + } else if (health_check_type == "tcp") { auto* tcp_health_check = health_check.mutable_tcp_health_check(); std::string send_text; for (const Json::ObjectSharedPtr& entry : json_health_check.getObjectArray("send")) { @@ -52,7 +52,7 @@ void CdsJson::translateHealthCheck(const Json::Object& json_health_check, tcp_health_check->mutable_receive()->Add()->set_text(hex_string); } } else { - ASSERT(hc_type == "redis"); + ASSERT(health_check_type == "redis"); auto* redis_health_check = health_check.mutable_custom_health_check(); redis_health_check->set_name("envoy.health_checkers.redis"); if (json_health_check.hasObject("redis_key")) { diff --git a/source/common/http/conn_manager_impl.cc b/source/common/http/conn_manager_impl.cc index 9c7e3fe220e0f..c941694efb99d 100644 --- a/source/common/http/conn_manager_impl.cc +++ b/source/common/http/conn_manager_impl.cc @@ -498,7 +498,7 @@ void ConnectionManagerImpl::ActiveStream::chargeStats(const HeaderMap& headers) uint64_t response_code = Utility::getResponseStatus(headers); stream_info_.response_code_ = response_code; - if (stream_info_.hc_request_) { + if (stream_info_.health_check_request_) { return; } diff --git a/source/common/stream_info/stream_info_impl.h b/source/common/stream_info/stream_info_impl.h index 2e4a49c517aba..4cf6d10c0020f 100644 --- a/source/common/stream_info/stream_info_impl.h +++ b/source/common/stream_info/stream_info_impl.h @@ -152,9 +152,9 @@ struct StreamInfoImpl : public StreamInfo { return upstream_local_address_; } - bool healthCheck() const override { return hc_request_; } + bool healthCheck() const override { return health_check_request_; } - void healthCheck(bool is_hc) override { hc_request_ = is_hc; } + void healthCheck(bool is_health_check) override { health_check_request_ = is_health_check; } void setDownstreamLocalAddress( const Network::Address::InstanceConstSharedPtr& downstream_local_address) override { @@ -218,7 +218,7 @@ struct StreamInfoImpl : public StreamInfo { absl::optional response_code_; uint64_t response_flags_{}; Upstream::HostDescriptionConstSharedPtr upstream_host_{}; - bool hc_request_{}; + bool health_check_request_{}; const Router::RouteEntry* route_entry_{}; envoy::api::v2::core::Metadata metadata_{}; FilterStateImpl filter_state_{}; diff --git a/source/common/tracing/http_tracer_impl.cc b/source/common/tracing/http_tracer_impl.cc index 8128b2f7fe84d..662501a4bf382 100644 --- a/source/common/tracing/http_tracer_impl.cc +++ b/source/common/tracing/http_tracer_impl.cc @@ -55,7 +55,7 @@ const std::string& HttpTracerUtility::toString(OperationName operation_name) { Decision HttpTracerUtility::isTracing(const StreamInfo::StreamInfo& stream_info, const Http::HeaderMap& request_headers) { - // Exclude HC requests immediately. + // Exclude health check requests immediately. if (stream_info.healthCheck()) { return {Reason::HealthCheck, false}; } diff --git a/source/common/upstream/health_checker_impl.cc b/source/common/upstream/health_checker_impl.cc index 9b6428f764bda..2e76168e6a374 100644 --- a/source/common/upstream/health_checker_impl.cc +++ b/source/common/upstream/health_checker_impl.cc @@ -45,37 +45,37 @@ class HealthCheckerFactoryContextImpl : public Server::Configuration::HealthChec }; HealthCheckerSharedPtr -HealthCheckerFactory::create(const envoy::api::v2::core::HealthCheck& hc_config, +HealthCheckerFactory::create(const envoy::api::v2::core::HealthCheck& health_check_config, Upstream::Cluster& cluster, Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, AccessLog::AccessLogManager& log_manager) { HealthCheckEventLoggerPtr event_logger; - if (!hc_config.event_log_path().empty()) { + if (!health_check_config.event_log_path().empty()) { event_logger = std::make_unique( - log_manager, dispatcher.timeSystem(), hc_config.event_log_path()); + log_manager, dispatcher.timeSystem(), health_check_config.event_log_path()); } - switch (hc_config.health_checker_case()) { + switch (health_check_config.health_checker_case()) { case envoy::api::v2::core::HealthCheck::HealthCheckerCase::kHttpHealthCheck: - return std::make_shared(cluster, hc_config, dispatcher, runtime, - random, std::move(event_logger)); + return std::make_shared(cluster, health_check_config, dispatcher, + runtime, random, std::move(event_logger)); case envoy::api::v2::core::HealthCheck::HealthCheckerCase::kTcpHealthCheck: - return std::make_shared(cluster, hc_config, dispatcher, runtime, random, - std::move(event_logger)); + return std::make_shared(cluster, health_check_config, dispatcher, runtime, + random, std::move(event_logger)); case envoy::api::v2::core::HealthCheck::HealthCheckerCase::kGrpcHealthCheck: if (!(cluster.info()->features() & Upstream::ClusterInfo::Features::HTTP2)) { throw EnvoyException(fmt::format("{} cluster must support HTTP/2 for gRPC healthchecking", cluster.info()->name())); } - return std::make_shared(cluster, hc_config, dispatcher, runtime, - random, std::move(event_logger)); + return std::make_shared(cluster, health_check_config, dispatcher, + runtime, random, std::move(event_logger)); case envoy::api::v2::core::HealthCheck::HealthCheckerCase::kCustomHealthCheck: { auto& factory = Config::Utility::getAndCheckFactory( - std::string(hc_config.custom_health_check().name())); + std::string(health_check_config.custom_health_check().name())); std::unique_ptr context( new HealthCheckerFactoryContextImpl(cluster, runtime, random, dispatcher, std::move(event_logger))); - return factory.createCustomHealthChecker(hc_config, *context); + return factory.createCustomHealthChecker(health_check_config, *context); } default: // Checked by schema. diff --git a/source/common/upstream/health_checker_impl.h b/source/common/upstream/health_checker_impl.h index 67302ca989b22..48690a178de86 100644 --- a/source/common/upstream/health_checker_impl.h +++ b/source/common/upstream/health_checker_impl.h @@ -23,7 +23,7 @@ class HealthCheckerFactory : public Logger::Loggable public: /** * Create a health checker. - * @param hc_config supplies the health check proto. + * @param health_check_config supplies the health check proto. * @param cluster supplies the owning cluster. * @param runtime supplies the runtime loader. * @param random supplies the random generator. @@ -31,7 +31,7 @@ class HealthCheckerFactory : public Logger::Loggable * @param event_logger supplies the event_logger. * @return a health checker. */ - static HealthCheckerSharedPtr create(const envoy::api::v2::core::HealthCheck& hc_config, + static HealthCheckerSharedPtr create(const envoy::api::v2::core::HealthCheck& health_check_config, Upstream::Cluster& cluster, Runtime::Loader& runtime, Runtime::RandomGenerator& random, Event::Dispatcher& dispatcher, diff --git a/source/common/upstream/upstream_impl.cc b/source/common/upstream/upstream_impl.cc index 8bd40b96997d8..3229e67a74464 100644 --- a/source/common/upstream/upstream_impl.cc +++ b/source/common/upstream/upstream_impl.cc @@ -749,10 +749,10 @@ void ClusterImplBase::setOutlierDetector(const Outlier::DetectorSharedPtr& outli } void ClusterImplBase::reloadHealthyHosts() { - // Every time a host changes HC state we cause a full healthy host recalculation which + // Every time a host changes Health Check state we cause a full healthy host recalculation which // for expensive LBs (ring, subset, etc.) can be quite time consuming. During startup, this // can also block worker threads by doing this repeatedly. There is no reason to do this - // as we will not start taking traffic until we are initialized. By blocking HC updates + // as we will not start taking traffic until we are initialized. By blocking Health Check updates // while initializing we can avoid this. if (initialization_complete_callback_ != nullptr) { return; diff --git a/source/extensions/health_checkers/redis/redis.h b/source/extensions/health_checkers/redis/redis.h index 5f3c42770d3dc..7de0ddb682519 100644 --- a/source/extensions/health_checkers/redis/redis.h +++ b/source/extensions/health_checkers/redis/redis.h @@ -54,7 +54,7 @@ class RedisHealthChecker : public Upstream::HealthCheckerImplBase { // Extensions::NetworkFilters::RedisProxy::ConnPool::Config bool disableOutlierEvents() const override { return true; } std::chrono::milliseconds opTimeout() const override { - // Allow the main HC infra to control timeout. + // Allow the main Health Check infra to control timeout. return parent_.timeout_ * 2; } diff --git a/source/extensions/health_checkers/redis/utility.h b/source/extensions/health_checkers/redis/utility.h index 40fe9bcaf8eb6..dd012c0b8f52b 100644 --- a/source/extensions/health_checkers/redis/utility.h +++ b/source/extensions/health_checkers/redis/utility.h @@ -14,10 +14,10 @@ namespace RedisHealthChecker { namespace { static const envoy::config::health_checker::redis::v2::Redis -getRedisHealthCheckConfig(const envoy::api::v2::core::HealthCheck& hc_config) { +getRedisHealthCheckConfig(const envoy::api::v2::core::HealthCheck& health_check_config) { ProtobufTypes::MessagePtr config = ProtobufTypes::MessagePtr{new envoy::config::health_checker::redis::v2::Redis()}; - MessageUtil::jsonConvert(hc_config.custom_health_check().config(), *config); + MessageUtil::jsonConvert(health_check_config.custom_health_check().config(), *config); return MessageUtil::downcastAndValidate( *config); } diff --git a/source/server/drain_manager_impl.cc b/source/server/drain_manager_impl.cc index 2e5c468b25881..e7ca1852f57c8 100644 --- a/source/server/drain_manager_impl.cc +++ b/source/server/drain_manager_impl.cc @@ -18,7 +18,7 @@ DrainManagerImpl::DrainManagerImpl(Instance& server, envoy::api::v2::Listener::D : server_(server), drain_type_(drain_type) {} bool DrainManagerImpl::drainClose() const { - // If we are actively HC failed and the drain type is default, always drain close. + // If we are actively health check failed and the drain type is default, always drain close. // // TODO(mattklein123): In relation to x-envoy-immediate-health-check-fail, it would be better // if even in the case of server health check failure we had some period of drain ramp up. This diff --git a/test/common/access_log/access_log_impl_test.cc b/test/common/access_log/access_log_impl_test.cc index f41b113d5c308..378617adb5bac 100644 --- a/test/common/access_log/access_log_impl_test.cc +++ b/test/common/access_log/access_log_impl_test.cc @@ -351,7 +351,7 @@ TEST_F(AccessLogImplTest, healthCheckTrue) { InstanceSharedPtr log = AccessLogFactory::fromProto(parseAccessLogFromJson(json), context_); Http::TestHeaderMapImpl header_map{}; - stream_info_.hc_request_ = true; + stream_info_.health_check_request_ = true; EXPECT_CALL(*file_, write(_)).Times(0); log->log(&header_map, &response_headers_, &response_trailers_, stream_info_); @@ -464,7 +464,7 @@ TEST_F(AccessLogImplTest, andFilter) { { EXPECT_CALL(*file_, write(_)).Times(0); Http::TestHeaderMapImpl header_map{}; - stream_info_.hc_request_ = true; + stream_info_.health_check_request_ = true; log->log(&header_map, &response_headers_, &response_trailers_, stream_info_); } } @@ -527,7 +527,7 @@ TEST_F(AccessLogImplTest, multipleOperators) { { EXPECT_CALL(*file_, write(_)).Times(0); Http::TestHeaderMapImpl header_map{}; - stream_info_.hc_request_ = true; + stream_info_.health_check_request_ = true; log->log(&header_map, &response_headers_, &response_trailers_, stream_info_); } diff --git a/test/common/stream_info/test_util.h b/test/common/stream_info/test_util.h index 11592c29f65fd..f708c9e4f470d 100644 --- a/test/common/stream_info/test_util.h +++ b/test/common/stream_info/test_util.h @@ -54,8 +54,8 @@ class TestStreamInfo : public StreamInfo::StreamInfo { const Network::Address::InstanceConstSharedPtr& upstreamLocalAddress() const override { return upstream_local_address_; } - bool healthCheck() const override { return hc_request_; } - void healthCheck(bool is_hc) override { hc_request_ = is_hc; } + bool healthCheck() const override { return health_check_request_; } + void healthCheck(bool is_health_check) override { health_check_request_ = is_health_check; } void setDownstreamLocalAddress( const Network::Address::InstanceConstSharedPtr& downstream_local_address) override { @@ -194,7 +194,7 @@ class TestStreamInfo : public StreamInfo::StreamInfo { absl::optional response_code_; uint64_t response_flags_{}; Upstream::HostDescriptionConstSharedPtr upstream_host_{}; - bool hc_request_{}; + bool health_check_request_{}; Network::Address::InstanceConstSharedPtr upstream_local_address_; Network::Address::InstanceConstSharedPtr downstream_local_address_; Network::Address::InstanceConstSharedPtr downstream_direct_remote_address_; diff --git a/test/common/tracing/http_tracer_impl_test.cc b/test/common/tracing/http_tracer_impl_test.cc index 99e07658df811..ac5ad1a0ddc90 100644 --- a/test/common/tracing/http_tracer_impl_test.cc +++ b/test/common/tracing/http_tracer_impl_test.cc @@ -72,7 +72,7 @@ TEST(HttpTracerUtilityTest, IsTracing) { EXPECT_TRUE(result.traced); } - // HC request. + // Health Check request. { Http::TestHeaderMapImpl traceable_header_hc{{"x-request-id", forced_guid}}; EXPECT_CALL(stream_info, healthCheck()).WillOnce(Return(true)); diff --git a/test/extensions/health_checkers/redis/redis_test.cc b/test/extensions/health_checkers/redis/redis_test.cc index 4e4f119c75c00..1fa714c68d90a 100644 --- a/test/extensions/health_checkers/redis/redis_test.cc +++ b/test/extensions/health_checkers/redis/redis_test.cc @@ -45,12 +45,12 @@ class RedisHealthCheckerTest config: )EOF"; - const auto& hc_config = Upstream::parseHealthCheckFromV2Yaml(yaml); - const auto& redis_config = getRedisHealthCheckConfig(hc_config); + const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& redis_config = getRedisHealthCheckConfig(health_check_config); health_checker_.reset( - new RedisHealthChecker(*cluster_, hc_config, redis_config, dispatcher_, runtime_, random_, - Upstream::HealthCheckEventLoggerPtr(event_logger_), *this)); + new RedisHealthChecker(*cluster_, health_check_config, redis_config, dispatcher_, runtime_, + random_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *this)); } void setupAlwaysLogHealthCheckFailures() { @@ -67,12 +67,12 @@ class RedisHealthCheckerTest config: )EOF"; - const auto& hc_config = Upstream::parseHealthCheckFromV2Yaml(yaml); - const auto& redis_config = getRedisHealthCheckConfig(hc_config); + const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& redis_config = getRedisHealthCheckConfig(health_check_config); health_checker_.reset( - new RedisHealthChecker(*cluster_, hc_config, redis_config, dispatcher_, runtime_, random_, - Upstream::HealthCheckEventLoggerPtr(event_logger_), *this)); + new RedisHealthChecker(*cluster_, health_check_config, redis_config, dispatcher_, runtime_, + random_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *this)); } void setupExistsHealthcheck() { @@ -89,12 +89,12 @@ class RedisHealthCheckerTest key: foo )EOF"; - const auto& hc_config = Upstream::parseHealthCheckFromV2Yaml(yaml); - const auto& redis_config = getRedisHealthCheckConfig(hc_config); + const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& redis_config = getRedisHealthCheckConfig(health_check_config); health_checker_.reset( - new RedisHealthChecker(*cluster_, hc_config, redis_config, dispatcher_, runtime_, random_, - Upstream::HealthCheckEventLoggerPtr(event_logger_), *this)); + new RedisHealthChecker(*cluster_, health_check_config, redis_config, dispatcher_, runtime_, + random_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *this)); } void setupDontReuseConnection() { @@ -111,12 +111,12 @@ class RedisHealthCheckerTest config: )EOF"; - const auto& hc_config = Upstream::parseHealthCheckFromV2Yaml(yaml); - const auto& redis_config = getRedisHealthCheckConfig(hc_config); + const auto& health_check_config = Upstream::parseHealthCheckFromV2Yaml(yaml); + const auto& redis_config = getRedisHealthCheckConfig(health_check_config); health_checker_.reset( - new RedisHealthChecker(*cluster_, hc_config, redis_config, dispatcher_, runtime_, random_, - Upstream::HealthCheckEventLoggerPtr(event_logger_), *this)); + new RedisHealthChecker(*cluster_, health_check_config, redis_config, dispatcher_, runtime_, + random_, Upstream::HealthCheckEventLoggerPtr(event_logger_), *this)); } Extensions::NetworkFilters::RedisProxy::ConnPool::ClientPtr diff --git a/test/integration/hds_integration_test.cc b/test/integration/hds_integration_test.cc index 702d731ae6d89..bd3d6de7607a9 100644 --- a/test/integration/hds_integration_test.cc +++ b/test/integration/hds_integration_test.cc @@ -167,9 +167,9 @@ class HdsIntegrationTest : public HttpIntegrationTest, health_check->mutable_health_checks(0)->mutable_interval()->set_seconds(MaxTimeout); health_check->mutable_health_checks(0)->mutable_unhealthy_threshold()->set_value(2); health_check->mutable_health_checks(0)->mutable_healthy_threshold()->set_value(2); - auto* tcp_hc = health_check->mutable_health_checks(0)->mutable_tcp_health_check(); - tcp_hc->mutable_send()->set_text("50696E67"); - tcp_hc->add_receive()->set_text("506F6E67"); + auto* tcp_health_check = health_check->mutable_health_checks(0)->mutable_tcp_health_check(); + tcp_health_check->mutable_send()->set_text("50696E67"); + tcp_health_check->add_receive()->set_text("506F6E67"); return server_health_check_specifier_; } diff --git a/test/mocks/stream_info/mocks.h b/test/mocks/stream_info/mocks.h index a6f46d09b8975..ece9ad8f93419 100644 --- a/test/mocks/stream_info/mocks.h +++ b/test/mocks/stream_info/mocks.h @@ -52,7 +52,7 @@ class MockStreamInfo : public StreamInfo { MOCK_METHOD1(setUpstreamLocalAddress, void(const Network::Address::InstanceConstSharedPtr&)); MOCK_CONST_METHOD0(upstreamLocalAddress, const Network::Address::InstanceConstSharedPtr&()); MOCK_CONST_METHOD0(healthCheck, bool()); - MOCK_METHOD1(healthCheck, void(bool is_hc)); + MOCK_METHOD1(healthCheck, void(bool is_health_check)); MOCK_METHOD1(setDownstreamLocalAddress, void(const Network::Address::InstanceConstSharedPtr&)); MOCK_CONST_METHOD0(downstreamLocalAddress, const Network::Address::InstanceConstSharedPtr&()); MOCK_METHOD1(setDownstreamDirectRemoteAddress,