diff --git a/Makefile b/Makefile index 667ee3fb0bb..bf1fe3d6e36 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,9 @@ ARTIFACTS_DIR ?= $(LOCAL_ARTIFACTS_DIR) BAZEL_STARTUP_ARGS ?= BAZEL_BUILD_ARGS ?= BAZEL_TEST_ARGS ?= +BAZEL_TARGETS ?= //... +# Some tests run so slowly under the santizers that they always timeout. +SANITIZER_EXCLUSIONS ?= -test/integration:mixer_fault_test HUB ?= TAG ?= ifeq "$(origin CC)" "default" @@ -31,7 +34,7 @@ endif PATH := /usr/lib/llvm-7/bin:$(PATH) build: - PATH=$(PATH) CC=$(CC) CXX=$(CXX) bazel $(BAZEL_STARTUP_ARGS) build $(BAZEL_BUILD_ARGS) //... + PATH=$(PATH) CC=$(CC) CXX=$(CXX) bazel $(BAZEL_STARTUP_ARGS) build $(BAZEL_BUILD_ARGS) $(BAZEL_TARGETS) @bazel shutdown # Build only envoy - fast @@ -44,15 +47,15 @@ clean: @bazel shutdown test: - PATH=$(PATH) CC=$(CC) CXX=$(CXX) bazel $(BAZEL_STARTUP_ARGS) test $(BAZEL_TEST_ARGS) //... + PATH=$(PATH) CC=$(CC) CXX=$(CXX) bazel $(BAZEL_STARTUP_ARGS) test $(BAZEL_TEST_ARGS) $(BAZEL_TARGETS) @bazel shutdown test_asan: - PATH=$(PATH) CC=$(CC) CXX=$(CXX) bazel $(BAZEL_STARTUP_ARGS) test $(BAZEL_TEST_ARGS) --config=clang-asan //... + PATH=$(PATH) CC=$(CC) CXX=$(CXX) bazel $(BAZEL_STARTUP_ARGS) test $(BAZEL_TEST_ARGS) --config=clang-asan -- $(BAZEL_TARGETS) $(SANITIZER_EXCLUSIONS) @bazel shutdown test_tsan: - PATH=$(PATH) CC=$(CC) CXX=$(CXX) bazel $(BAZEL_STARTUP_ARGS) test $(BAZEL_TEST_ARGS) --config=clang-tsan //... + PATH=$(PATH) CC=$(CC) CXX=$(CXX) bazel $(BAZEL_STARTUP_ARGS) test $(BAZEL_TEST_ARGS) --config=clang-tsan -- $(BAZEL_TARGETS) $(SANITIZER_EXCLUSIONS) @bazel shutdown check: diff --git a/OWNERS b/OWNERS index 41f4a1c42d1..838b4632335 100644 --- a/OWNERS +++ b/OWNERS @@ -7,6 +7,7 @@ reviewers: - JimmyCYJ - venilnoronha - kyessenov + - duderino approvers: - qiwzhang - lizan @@ -16,3 +17,4 @@ approvers: - JimmyCYJ - venilnoronha - kyessenov + - duderino diff --git a/include/istio/control/http/request_handler.h b/include/istio/control/http/request_handler.h index 916b6191dc0..f2f0369a092 100644 --- a/include/istio/control/http/request_handler.h +++ b/include/istio/control/http/request_handler.h @@ -35,10 +35,13 @@ class RequestHandler { // * extract attributes from the config. // * if necessary, forward some attributes to downstream // * make a Check call. - virtual ::istio::mixerclient::CancelFunc Check( - CheckData* check_data, HeaderUpdate* header_update, - ::istio::mixerclient::TransportCheckFunc transport, - ::istio::mixerclient::CheckDoneFunc on_done) = 0; + virtual void Check(CheckData* check_data, HeaderUpdate* header_update, + const ::istio::mixerclient::TransportCheckFunc& transport, + const ::istio::mixerclient::CheckDoneFunc& on_done) = 0; + + virtual void ResetCancel() = 0; + + virtual void CancelCheck() = 0; // Make a Report call. It will: // * check service config to see if Report is required diff --git a/include/istio/control/tcp/request_handler.h b/include/istio/control/tcp/request_handler.h index 2fb0bedf60a..8c415daa22a 100644 --- a/include/istio/control/tcp/request_handler.h +++ b/include/istio/control/tcp/request_handler.h @@ -32,8 +32,12 @@ class RequestHandler { // Perform a Check call. It will: // * extract downstream tcp connection attributes // * check config, make a Check call if necessary. - virtual ::istio::mixerclient::CancelFunc Check( - CheckData* check_data, ::istio::mixerclient::CheckDoneFunc on_done) = 0; + virtual void Check(CheckData* check_data, + const ::istio::mixerclient::CheckDoneFunc& on_done) = 0; + + virtual void ResetCancel() = 0; + + virtual void CancelCheck() = 0; // Make report call. virtual void Report(ReportData* report_data, diff --git a/include/istio/mixerclient/check_response.h b/include/istio/mixerclient/check_response.h index 86000628b1e..82928d0c06f 100644 --- a/include/istio/mixerclient/check_response.h +++ b/include/istio/mixerclient/check_response.h @@ -22,21 +22,15 @@ namespace istio { namespace mixerclient { -// The CheckResponseInfo holds response information in detail. -struct CheckResponseInfo { - // Whether this check response is from cache. - bool is_check_cache_hit{false}; +// The CheckResponseInfo exposes policy and quota check details to the check +// callbacks. +class CheckResponseInfo { + public: + virtual ~CheckResponseInfo(){}; - // Whether this quota response is from cache. - bool is_quota_cache_hit{false}; + virtual const ::google::protobuf::util::Status& status() const = 0; - // The check and quota response status. - ::google::protobuf::util::Status response_status{ - ::google::protobuf::util::Status::UNKNOWN}; - - // Routing directive (applicable if the status is OK) - ::istio::mixer::v1::RouteDirective route_directive{ - ::istio::mixer::v1::RouteDirective::default_instance()}; + virtual const ::istio::mixer::v1::RouteDirective& routeDirective() const = 0; }; } // namespace mixerclient diff --git a/include/istio/mixerclient/client.h b/include/istio/mixerclient/client.h index d869a1144f0..648c5588636 100644 --- a/include/istio/mixerclient/client.h +++ b/include/istio/mixerclient/client.h @@ -19,6 +19,8 @@ #include "environment.h" #include "include/istio/quota_config/requirement.h" #include "options.h" +#include "src/istio/mixerclient/check_context.h" +#include "src/istio/mixerclient/shared_attributes.h" #include @@ -50,24 +52,79 @@ struct MixerClientOptions { // The statistics recorded by mixerclient library. struct Statistics { - // Total number of check calls. - uint64_t total_check_calls; - // Total number of remote check calls. - uint64_t total_remote_check_calls; - // Total number of remote check calls that blocking origin requests. - uint64_t total_blocking_remote_check_calls; - - // Total number of quota calls. - uint64_t total_quota_calls; - // Total number of remote quota calls. - uint64_t total_remote_quota_calls; - // Total number of remote quota calls that blocking origin requests. - uint64_t total_blocking_remote_quota_calls; + // + // Policy check counters. + // + // total_check_calls = total_check_hits + total_check_misses + // total_check_hits = total_check_hit_accepts + total_check_hit_denies + // total_remote_check_calls = total_check_misses + // total_remote_check_calls >= total_remote_check_accepts + + // total_remote_check_denies + // ^ Transport errors are responsible for the >= + // + + uint64_t total_check_calls_{0}; // 1.0 + uint64_t total_check_cache_hits_{0}; // 1.1 + uint64_t total_check_cache_misses_{0}; // 1.1 + uint64_t total_check_cache_hit_accepts_{0}; // 1.1 + uint64_t total_check_cache_hit_denies_{0}; // 1.1 + uint64_t total_remote_check_calls_{0}; // 1.0 + uint64_t total_remote_check_accepts_{0}; // 1.1 + uint64_t total_remote_check_denies_{0}; // 1.1 + + // + // Quota check counters + // + // total_quota_calls = total_quota_hits + total_quota_misses + // total_quota_hits = total_quota_hit_accepts + total_quota_hit_denies + // total_remote_quota_calls = total_quota_misses + + // total_remote_quota_prefetch_calls total_remote_quota_calls >= + // total_remote_quota_accepts + total_remote_quota_denies + // ^ Transport errors are responsible for the >= + // + + uint64_t total_quota_calls_{0}; // 1.0 + uint64_t total_quota_cache_hits_{0}; // 1.1 + uint64_t total_quota_cache_misses_{0}; // 1.1 + uint64_t total_quota_cache_hit_accepts_{0}; // 1.1 + uint64_t total_quota_cache_hit_denies_{0}; // 1.1 + uint64_t total_remote_quota_calls_{0}; // 1.0 + uint64_t total_remote_quota_accepts_{0}; // 1.1 + uint64_t total_remote_quota_denies_{0}; // 1.1 + uint64_t total_remote_quota_prefetch_calls_{0}; // 1.1 + + // + // Counters for upstream requests to Mixer. + // + // total_remote_calls = SUM(total_remote_call_successes, ..., + // total_remote_call_other_errors) Total transport errors would be + // (total_remote_calls - total_remote_call_successes). + // + + uint64_t total_remote_calls_{0}; // 1.1 + uint64_t total_remote_call_successes_{0}; // 1.1 + uint64_t total_remote_call_timeouts_{0}; // 1.1 + uint64_t total_remote_call_send_errors_{0}; // 1.1 + uint64_t total_remote_call_other_errors_{0}; // 1.1 + uint64_t total_remote_call_retries_{0}; // 1.1 + uint64_t total_remote_call_cancellations_{0}; // 1.1 + + // + // Telemetry report counters + // // Total number of report calls. - uint64_t total_report_calls; + uint64_t total_report_calls_{0}; // 1.0 // Total number of remote report calls. - uint64_t total_remote_report_calls; + uint64_t total_remote_report_calls_{0}; // 1.0 + // Remote report calls that succeeed + uint64_t total_remote_report_successes_{0}; // 1.1 + // Remote report calls that fail due to timeout waiting for the response + uint64_t total_remote_report_timeouts_{0}; // 1.1 + // Remote report calls that fail sending the request (socket connect or write) + uint64_t total_remote_report_send_errors_{0}; // 1.1 + // Remote report calls that fail do to some other error + uint64_t total_remote_report_other_errors_{0}; // 1.1 }; class MixerClient { @@ -84,13 +141,13 @@ class MixerClient { // The response data from mixer will be consumed by mixer client. // A check call. - virtual CancelFunc Check( - const ::istio::mixer::v1::Attributes& attributes, - const std::vector<::istio::quota_config::Requirement>& quotas, - TransportCheckFunc transport, CheckDoneFunc on_done) = 0; + virtual void Check(istio::mixerclient::CheckContextSharedPtr& context, + const TransportCheckFunc& transport, + const CheckDoneFunc& on_done) = 0; // A report call. - virtual void Report(const ::istio::mixer::v1::Attributes& attributes) = 0; + virtual void Report( + const istio::mixerclient::SharedAttributesSharedPtr& attributes) = 0; // Get statistics. virtual void GetStatistics(Statistics* stat) const = 0; diff --git a/include/istio/mixerclient/options.h b/include/istio/mixerclient/options.h index f24a0a17f23..3d988cf7221 100644 --- a/include/istio/mixerclient/options.h +++ b/include/istio/mixerclient/options.h @@ -39,7 +39,17 @@ struct CheckOptions { const int num_entries; // If true, Check is passed for any network failures. - bool network_fail_open = true; + bool network_fail_open{true}; + + // Number of retries on transport error + uint32_t retries{0}; + + // Base milliseconds to sleep between retries. Will be adjusted by + // exponential backoff and jitter. + uint32_t base_retry_ms{80}; + + // Max milliseconds to sleep between retries. + uint32_t max_retry_ms{1000}; }; // Options controlling report batch. diff --git a/include/istio/utils/attribute_names.h b/include/istio/utils/attribute_names.h index c355e8d843e..5c6f4b249c9 100644 --- a/include/istio/utils/attribute_names.h +++ b/include/istio/utils/attribute_names.h @@ -31,6 +31,11 @@ struct AttributeName { static const char kSourceUID[]; static const char kDestinationPrincipal[]; + static const char kDestinationServiceName[]; + static const char kDestinationServiceUID[]; + static const char kDestinationServiceHost[]; + static const char kDestinationServiceNamespace[]; + static const char kRequestHeaders[]; static const char kRequestHost[]; static const char kRequestMethod[]; diff --git a/include/istio/utils/attributes_builder.h b/include/istio/utils/attributes_builder.h index c48d872775c..4d1f0996284 100644 --- a/include/istio/utils/attributes_builder.h +++ b/include/istio/utils/attributes_builder.h @@ -26,6 +26,8 @@ namespace istio { namespace utils { +const char kMixerMetadataKey[] = "istio.mixer"; + // Builder class to add attribute to protobuf Attributes. // Its usage: // builder(attribute).Add("key", value) @@ -142,7 +144,9 @@ class AttributesBuilder { } for (const auto &filter : filter_state) { - AddProtoStructStringMap(filter.first, filter.second); + if (FiltersToIgnore().find(filter.first) == FiltersToIgnore().end()) { + AddProtoStructStringMap(filter.first, filter.second); + } } } @@ -152,6 +156,14 @@ class AttributesBuilder { } private: + const std::unordered_set &FiltersToIgnore() { + static const auto *filters = + new std::unordered_set{kMixerMetadataKey}; + return *filters; + } + + // TODO(jblatt) audit all uses of raw pointers and replace as many as possible + // with unique/shared pointers. ::istio::mixer::v1::Attributes *attributes_; }; diff --git a/include/istio/utils/simple_lru_cache_inl.h b/include/istio/utils/simple_lru_cache_inl.h index 09eea44fac2..3d6c8b6156d 100644 --- a/include/istio/utils/simple_lru_cache_inl.h +++ b/include/istio/utils/simple_lru_cache_inl.h @@ -497,7 +497,7 @@ class SimpleLRUCacheBase { // so we implement it in the derived SimpleLRUCache. virtual void RemoveElement(const Key& k, Value* value) = 0; - virtual void DebugIterator(const Key& k, const Value* value, int pin_count, + virtual void DebugIterator(const Key&, const Value* value, int pin_count, int64_t last_timestamp, bool is_deferred, std::string* output) const { std::stringstream ss; @@ -1054,7 +1054,7 @@ class SimpleLRUCache EQ>(total_units) {} protected: - virtual void RemoveElement(const Key& k, Value* value) { delete value; } + virtual void RemoveElement(const Key&, Value* value) { delete value; } private: GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(SimpleLRUCache); @@ -1077,7 +1077,7 @@ class SimpleLRUCacheWithDeleter : Base(total_units), deleter_(deleter) {} protected: - virtual void RemoveElement(const Key& k, Value* value) { deleter_(value); } + virtual void RemoveElement(const Key&, Value* value) { deleter_(value); } private: Deleter deleter_; diff --git a/istio.deps b/istio.deps index b3ed648b633..627617d754e 100644 --- a/istio.deps +++ b/istio.deps @@ -4,7 +4,7 @@ "name": "ISTIO_API", "repoName": "api", "file": "repositories.bzl", - "lastStableSHA": "825044c7e15f6723d558b7b878855670663c2e1e" + "lastStableSHA": "8463cba039d858e8a849847b872ecea50b0994df" }, { "_comment": "", diff --git a/repositories.bzl b/repositories.bzl index 37fb808b3bd..5a219a8f213 100644 --- a/repositories.bzl +++ b/repositories.bzl @@ -99,8 +99,14 @@ cc_library( actual = "@googletest_git//:googletest_prod", ) -ISTIO_API = "056eb85d96f09441775d79283c149d93fcbd0982" -ISTIO_API_SHA256 = "df491c399f0a06bb2b85f43f5328c880c8e5cb5b3ce972efbd1ce137f83ebc52" +# +# To update these... +# 1) find the ISTIO_API SHA you want in git +# 2) wget https://github.com/istio/api/archive/ISTIO_API_SHA.tar.gz +# 3) sha256sum ISTIO_API_SHA.tar.gz +# +ISTIO_API = "8463cba039d858e8a849847b872ecea50b0994df" +ISTIO_API_SHA256 = "ae0fecec9bd316ec811833fff72ba191cf8c97348b33995585a1baa79fb26bf9" def mixerapi_repositories(bind = True): BUILD = """ diff --git a/src/envoy/http/authn/http_filter_factory.cc b/src/envoy/http/authn/http_filter_factory.cc index 221cc78a6e5..c61431a4c7c 100644 --- a/src/envoy/http/authn/http_filter_factory.cc +++ b/src/envoy/http/authn/http_filter_factory.cc @@ -27,6 +27,8 @@ namespace Envoy { namespace Server { namespace Configuration { +namespace iaapi = istio::authentication::v1alpha1; + class AuthnFilterConfig : public NamedHttpFilterConfigFactory, public Logger::Loggable { public: @@ -73,6 +75,9 @@ class AuthnFilterConfig : public NamedHttpFilterConfigFactory, // TODO(incfly): add a test to simulate different config can be handled // correctly similar to multiplexing on different port. auto filter_config = std::make_shared(config_pb); + // Print a log to remind user to upgrade to the mTLS setting. This will only + // be called when a new config is received by Envoy. + warnPermissiveMode(*filter_config); return [filter_config](Http::FilterChainFactoryCallbacks& callbacks) -> void { callbacks.addStreamDecoderFilter( @@ -80,6 +85,29 @@ class AuthnFilterConfig : public NamedHttpFilterConfigFactory, *filter_config)); }; } + + void warnPermissiveMode(const FilterConfig& filter_config) { + for (const auto& method : filter_config.policy().peers()) { + switch (method.params_case()) { + case iaapi::PeerAuthenticationMethod::ParamsCase::kMtls: + if (method.mtls().mode() == iaapi::MutualTls_Mode_PERMISSIVE) { + ENVOY_LOG( + warn, + "mTLS PERMISSIVE mode is used, connection can be either " + "plaintext or TLS, and client cert can be omitted. " + "Please consider to upgrade to mTLS STRICT mode for more " + "secure " + "configuration that only allows TLS connection with client " + "cert. " + "See https://istio.io/docs/tasks/security/mtls-migration/"); + return; + } + break; + default: + break; + } + } + } }; /** diff --git a/src/envoy/http/mixer/control_factory.h b/src/envoy/http/mixer/control_factory.h index 2dc21057b98..fdc8a7c59d5 100644 --- a/src/envoy/http/mixer/control_factory.h +++ b/src/envoy/http/mixer/control_factory.h @@ -19,6 +19,7 @@ #include "envoy/local_info/local_info.h" #include "src/envoy/http/mixer/control.h" #include "src/envoy/utils/stats.h" +#include "src/istio/utils/logger.h" namespace Envoy { namespace Http { @@ -51,6 +52,10 @@ class ControlFactory : public Logger::Loggable { return std::make_shared(control_data, cm, dispatcher, random, scope, local_info); }); + + // All MIXER_DEBUG(), MIXER_WARN(), etc log messages will be passed to + // ENVOY_LOG(). + istio::utils::setLogger(std::make_unique()); } Control& control() { return tls_->getTyped(); } @@ -62,6 +67,46 @@ class ControlFactory : public Logger::Loggable { return {ALL_MIXER_FILTER_STATS(POOL_COUNTER_PREFIX(scope, name))}; } + class LoggerAdaptor : public istio::utils::Logger, + Envoy::Logger::Loggable { + virtual bool isLoggable(istio::utils::Logger::Level level) override { + switch (level) { + case istio::utils::Logger::Level::DEBUG_: + return ENVOY_LOG_CHECK_LEVEL(debug); + case istio::utils::Logger::Level::TRACE_: + return ENVOY_LOG_CHECK_LEVEL(trace); + case istio::utils::Logger::Level::INFO_: + return ENVOY_LOG_CHECK_LEVEL(info); + case istio::utils::Logger::Level::WARN_: + return ENVOY_LOG_CHECK_LEVEL(warn); + case istio::utils::Logger::Level::ERROR_: + return ENVOY_LOG_CHECK_LEVEL(error); + default: + NOT_REACHED_GCOVR_EXCL_LINE; + } + } + + virtual void writeBuffer(istio::utils::Logger::Level level, + const char* buffer) override { + switch (level) { + case istio::utils::Logger::Level::DEBUG_: + ENVOY_LOGGER().debug(buffer); + break; + case istio::utils::Logger::Level::TRACE_: + ENVOY_LOGGER().trace(buffer); + break; + case istio::utils::Logger::Level::INFO_: + ENVOY_LOGGER().info(buffer); + break; + case istio::utils::Logger::Level::WARN_: + ENVOY_LOGGER().warn(buffer); + break; + case istio::utils::Logger::Level::ERROR_: + ENVOY_LOGGER().error(buffer); + } + } + }; + // The control data object ControlDataSharedPtr control_data_; // Thread local slot. diff --git a/src/envoy/http/mixer/filter.cc b/src/envoy/http/mixer/filter.cc index 3de446898ca..8c310fa27df 100644 --- a/src/envoy/http/mixer/filter.cc +++ b/src/envoy/http/mixer/filter.cc @@ -24,6 +24,7 @@ #include "src/envoy/utils/header_update.h" using ::google::protobuf::util::Status; +using ::istio::mixer::v1::RouteDirective; using ::istio::mixerclient::CheckResponseInfo; namespace Envoy { @@ -75,7 +76,7 @@ FilterHeadersStatus Filter::decodeHeaders(HeaderMap& headers, bool) { decoder_callbacks_->connection()); Utils::HeaderUpdate header_update(&headers); headers_ = &headers; - cancel_check_ = handler_->Check( + handler_->Check( &check_data, &header_update, control_.GetCheckTransport(decoder_callbacks_->activeSpan()), [this](const CheckResponseInfo& info) { completeCheck(info); }); @@ -147,7 +148,8 @@ void Filter::setDecoderFilterCallbacks( } void Filter::completeCheck(const CheckResponseInfo& info) { - auto status = info.response_status; + const Status& status = info.status(); + ENVOY_LOG(debug, "Called Mixer::Filter : check complete {}", status.ToString()); // This stream has been reset, abort the callback. @@ -155,7 +157,7 @@ void Filter::completeCheck(const CheckResponseInfo& info) { return; } - route_directive_ = info.route_directive; + route_directive_ = info.routeDirective(); Utils::CheckResponseInfoToStreamInfo(info, decoder_callbacks_->streamInfo()); @@ -201,14 +203,12 @@ void Filter::completeCheck(const CheckResponseInfo& info) { void Filter::onDestroy() { ENVOY_LOG(debug, "Called Mixer::Filter : {} state: {}", __func__, state_); - if (state_ != Calling) { - cancel_check_ = nullptr; + if (state_ != Calling && handler_) { + handler_->ResetCancel(); } state_ = Responded; - if (cancel_check_) { - ENVOY_LOG(debug, "Cancelling check call"); - cancel_check_(); - cancel_check_ = nullptr; + if (handler_) { + handler_->CancelCheck(); } } diff --git a/src/envoy/http/mixer/filter.h b/src/envoy/http/mixer/filter.h index 326c60acac5..e5e12503561 100644 --- a/src/envoy/http/mixer/filter.h +++ b/src/envoy/http/mixer/filter.h @@ -89,8 +89,6 @@ class Filter : public StreamFilter, Control& control_; // The request handler. std::unique_ptr<::istio::control::http::RequestHandler> handler_; - // The pending callback object. - istio::mixerclient::CancelFunc cancel_check_; enum State { NotStarted, Calling, Complete, Responded }; // The state diff --git a/src/envoy/tcp/mixer/filter.cc b/src/envoy/tcp/mixer/filter.cc index 9cb94a25efc..822d130fb9f 100644 --- a/src/envoy/tcp/mixer/filter.cc +++ b/src/envoy/tcp/mixer/filter.cc @@ -43,14 +43,12 @@ void Filter::initializeReadFilterCallbacks( } void Filter::cancelCheck() { - if (state_ != State::Calling) { - cancel_check_ = nullptr; + if (state_ != State::Calling && handler_) { + handler_->ResetCancel(); } state_ = State::Closed; - if (cancel_check_) { - ENVOY_LOG(debug, "Cancelling check call"); - cancel_check_(); - cancel_check_ = nullptr; + if (handler_) { + handler_->CancelCheck(); } } @@ -61,7 +59,7 @@ void Filter::callCheck() { state_ = State::Calling; filter_callbacks_->connection().readDisable(true); calling_check_ = true; - cancel_check_ = handler_->Check( + handler_->Check( this, [this](const CheckResponseInfo &info) { completeCheck(info); }); calling_check_ = false; } @@ -138,9 +136,9 @@ Network::FilterStatus Filter::onNewConnection() { } void Filter::completeCheck(const CheckResponseInfo &info) { - const auto &status = info.response_status; + const auto &status = info.status(); ENVOY_LOG(debug, "Called tcp filter completeCheck: {}", status.ToString()); - cancel_check_ = nullptr; + handler_->ResetCancel(); if (state_ == State::Closed) { return; } diff --git a/src/envoy/tcp/mixer/filter.h b/src/envoy/tcp/mixer/filter.h index b9bfb97369c..cf59f1cdb88 100644 --- a/src/envoy/tcp/mixer/filter.h +++ b/src/envoy/tcp/mixer/filter.h @@ -86,8 +86,6 @@ class Filter : public Network::Filter, // Cancel the pending Check call. void cancelCheck(); - // the cancel check - istio::mixerclient::CancelFunc cancel_check_; // the control object. Control &control_; // pre-request handler diff --git a/src/envoy/utils/stats.cc b/src/envoy/utils/stats.cc index cb1211c5c34..3e316a06b00 100644 --- a/src/envoy/utils/stats.cc +++ b/src/envoy/utils/stats.cc @@ -53,47 +53,44 @@ void MixerStatsObject::OnTimer() { timer_->enableTimer(std::chrono::milliseconds(stats_update_interval_)); } +#define CHECK_AND_UPDATE_STATS(NAME) \ + if (new_stats.NAME > old_stats_.NAME) { \ + stats_.NAME.add(new_stats.NAME - old_stats_.NAME); \ + } + void MixerStatsObject::CheckAndUpdateStats( const ::istio::mixerclient::Statistics& new_stats) { - if (new_stats.total_check_calls > old_stats_.total_check_calls) { - stats_.total_check_calls_.add(new_stats.total_check_calls - - old_stats_.total_check_calls); - } - if (new_stats.total_remote_check_calls > - old_stats_.total_remote_check_calls) { - stats_.total_remote_check_calls_.add(new_stats.total_remote_check_calls - - old_stats_.total_remote_check_calls); - } - if (new_stats.total_blocking_remote_check_calls > - old_stats_.total_blocking_remote_check_calls) { - stats_.total_blocking_remote_check_calls_.add( - new_stats.total_blocking_remote_check_calls - - old_stats_.total_blocking_remote_check_calls); - } - if (new_stats.total_quota_calls > old_stats_.total_quota_calls) { - stats_.total_quota_calls_.add(new_stats.total_quota_calls - - old_stats_.total_quota_calls); - } - if (new_stats.total_remote_quota_calls > - old_stats_.total_remote_quota_calls) { - stats_.total_remote_quota_calls_.add(new_stats.total_remote_quota_calls - - old_stats_.total_remote_quota_calls); - } - if (new_stats.total_blocking_remote_quota_calls > - old_stats_.total_blocking_remote_quota_calls) { - stats_.total_blocking_remote_quota_calls_.add( - new_stats.total_blocking_remote_quota_calls - - old_stats_.total_blocking_remote_quota_calls); - } - if (new_stats.total_report_calls > old_stats_.total_report_calls) { - stats_.total_report_calls_.add(new_stats.total_report_calls - - old_stats_.total_report_calls); - } - if (new_stats.total_remote_report_calls > - old_stats_.total_remote_report_calls) { - stats_.total_remote_report_calls_.add(new_stats.total_remote_report_calls - - old_stats_.total_remote_report_calls); - } + CHECK_AND_UPDATE_STATS(total_check_calls_); + CHECK_AND_UPDATE_STATS(total_check_cache_hits_); + CHECK_AND_UPDATE_STATS(total_check_cache_misses_); + CHECK_AND_UPDATE_STATS(total_check_cache_hit_accepts_); + CHECK_AND_UPDATE_STATS(total_check_cache_hit_denies_); + CHECK_AND_UPDATE_STATS(total_remote_check_calls_); + CHECK_AND_UPDATE_STATS(total_remote_check_accepts_); + CHECK_AND_UPDATE_STATS(total_remote_check_denies_); + CHECK_AND_UPDATE_STATS(total_quota_calls_); + CHECK_AND_UPDATE_STATS(total_quota_cache_hits_); + CHECK_AND_UPDATE_STATS(total_quota_cache_misses_); + CHECK_AND_UPDATE_STATS(total_quota_cache_hit_accepts_); + CHECK_AND_UPDATE_STATS(total_quota_cache_hit_denies_); + CHECK_AND_UPDATE_STATS(total_remote_quota_calls_); + CHECK_AND_UPDATE_STATS(total_remote_quota_accepts_); + CHECK_AND_UPDATE_STATS(total_remote_quota_denies_); + CHECK_AND_UPDATE_STATS(total_remote_quota_prefetch_calls_); + CHECK_AND_UPDATE_STATS(total_remote_calls_); + CHECK_AND_UPDATE_STATS(total_remote_call_successes_); + CHECK_AND_UPDATE_STATS(total_remote_call_timeouts_); + CHECK_AND_UPDATE_STATS(total_remote_call_send_errors_); + CHECK_AND_UPDATE_STATS(total_remote_call_other_errors_); + CHECK_AND_UPDATE_STATS(total_remote_call_retries_); + CHECK_AND_UPDATE_STATS(total_remote_call_cancellations_); + + CHECK_AND_UPDATE_STATS(total_report_calls_); + CHECK_AND_UPDATE_STATS(total_remote_report_calls_); + CHECK_AND_UPDATE_STATS(total_remote_report_successes_); + CHECK_AND_UPDATE_STATS(total_remote_report_timeouts_); + CHECK_AND_UPDATE_STATS(total_remote_report_send_errors_); + CHECK_AND_UPDATE_STATS(total_remote_report_other_errors_); // Copy new_stats to old_stats_ for next stats update. old_stats_ = new_stats; diff --git a/src/envoy/utils/stats.h b/src/envoy/utils/stats.h index 7b7e63ff712..e1f93510dea 100644 --- a/src/envoy/utils/stats.h +++ b/src/envoy/utils/stats.h @@ -27,15 +27,37 @@ namespace Utils { * All mixer filter stats. @see stats_macros.h */ // clang-format off -#define ALL_MIXER_FILTER_STATS(COUNTER) \ - COUNTER(total_check_calls) \ - COUNTER(total_remote_check_calls) \ - COUNTER(total_blocking_remote_check_calls) \ - COUNTER(total_quota_calls) \ - COUNTER(total_remote_quota_calls) \ - COUNTER(total_blocking_remote_quota_calls) \ - COUNTER(total_report_calls) \ - COUNTER(total_remote_report_calls) +#define ALL_MIXER_FILTER_STATS(COUNTER) \ + COUNTER(total_check_calls) \ + COUNTER(total_check_cache_hits) \ + COUNTER(total_check_cache_misses) \ + COUNTER(total_check_cache_hit_accepts) \ + COUNTER(total_check_cache_hit_denies) \ + COUNTER(total_remote_check_calls) \ + COUNTER(total_remote_check_accepts) \ + COUNTER(total_remote_check_denies) \ + COUNTER(total_quota_calls) \ + COUNTER(total_quota_cache_hits) \ + COUNTER(total_quota_cache_misses) \ + COUNTER(total_quota_cache_hit_accepts) \ + COUNTER(total_quota_cache_hit_denies) \ + COUNTER(total_remote_quota_calls) \ + COUNTER(total_remote_quota_accepts) \ + COUNTER(total_remote_quota_denies) \ + COUNTER(total_remote_quota_prefetch_calls) \ + COUNTER(total_remote_calls) \ + COUNTER(total_remote_call_successes) \ + COUNTER(total_remote_call_timeouts) \ + COUNTER(total_remote_call_send_errors) \ + COUNTER(total_remote_call_other_errors) \ + COUNTER(total_remote_call_retries) \ + COUNTER(total_remote_call_cancellations) \ + COUNTER(total_report_calls) \ + COUNTER(total_remote_report_calls) \ + COUNTER(total_remote_report_successes) \ + COUNTER(total_remote_report_timeouts) \ + COUNTER(total_remote_report_send_errors) \ + COUNTER(total_remote_report_other_errors) // clang-format on /** diff --git a/src/envoy/utils/utils.cc b/src/envoy/utils/utils.cc index 0b7d95bcb9c..d598d12bc0e 100644 --- a/src/envoy/utils/utils.cc +++ b/src/envoy/utils/utils.cc @@ -14,6 +14,7 @@ */ #include "src/envoy/utils/utils.h" +#include "include/istio/utils/attributes_builder.h" #include "mixer/v1/attributes.pb.h" using ::google::protobuf::Message; @@ -142,16 +143,13 @@ Status ParseJsonMessage(const std::string& json, Message* output) { void CheckResponseInfoToStreamInfo( const istio::mixerclient::CheckResponseInfo& check_response, StreamInfo::StreamInfo& stream_info) { - static std::string metadata_key = "istio.mixer"; - - if (!check_response.response_status.ok()) { + if (!check_response.status().ok()) { stream_info.setResponseFlag( StreamInfo::ResponseFlag::UnauthorizedExternalService); ProtobufWkt::Struct metadata; auto& fields = *metadata.mutable_fields(); - fields["status"].set_string_value( - check_response.response_status.ToString()); - stream_info.setDynamicMetadata(metadata_key, metadata); + fields["status"].set_string_value(check_response.status().ToString()); + stream_info.setDynamicMetadata(istio::utils::kMixerMetadataKey, metadata); } } diff --git a/src/envoy/utils/utils_test.cc b/src/envoy/utils/utils_test.cc index acd2bb4f9de..53f262dbba4 100644 --- a/src/envoy/utils/utils_test.cc +++ b/src/envoy/utils/utils_test.cc @@ -15,6 +15,7 @@ #include "src/envoy/utils/utils.h" #include "mixer/v1/config/client/client_config.pb.h" +#include "src/istio/mixerclient/check_context.h" #include "test/mocks/stream_info/mocks.h" #include "test/test_common/utility.h" @@ -48,8 +49,9 @@ TEST(UtilsTest, ParseMessageWithUnknownField) { } TEST(UtilsTest, CheckResponseInfoToStreamInfo) { - ::istio::mixerclient::CheckResponseInfo - check_response; // by default status is unknown + auto attributes = std::make_shared<::istio::mixerclient::SharedAttributes>(); + ::istio::mixerclient::CheckContext check_response( + 0U, false /* fail_open */, attributes); // by default status is unknown Envoy::StreamInfo::MockStreamInfo mock_stream_info; EXPECT_CALL( diff --git a/src/istio/control/BUILD b/src/istio/control/BUILD index 637f9817586..f58bfe9ba94 100644 --- a/src/istio/control/BUILD +++ b/src/istio/control/BUILD @@ -21,7 +21,6 @@ cc_library( ], hdrs = [ "client_context_base.h", - "request_context.h", ], visibility = [":__subpackages__"], deps = [ diff --git a/src/istio/control/client_context_base.cc b/src/istio/control/client_context_base.cc index c2ff0ef551e..9ca884c2b6d 100644 --- a/src/istio/control/client_context_base.cc +++ b/src/istio/control/client_context_base.cc @@ -17,6 +17,7 @@ #include "include/istio/mixerclient/check_response.h" #include "include/istio/utils/attribute_names.h" #include "include/istio/utils/attributes_builder.h" +#include "src/istio/utils/logger.h" using ::google::protobuf::util::Status; using ::istio::mixer::v1::config::client::NetworkFailPolicy; @@ -30,6 +31,7 @@ using ::istio::mixerclient::MixerClientOptions; using ::istio::mixerclient::QuotaOptions; using ::istio::mixerclient::ReportOptions; using ::istio::mixerclient::Statistics; +using ::istio::mixerclient::TimerCreateFunc; using ::istio::mixerclient::TransportCheckFunc; using ::istio::utils::CreateLocalAttributes; using ::istio::utils::LocalNode; @@ -38,6 +40,16 @@ namespace istio { namespace control { namespace { +static constexpr uint32_t MaxDurationSec = 24 * 60 * 60; + +static uint32_t DurationToMsec(const ::google::protobuf::Duration& duration) { + uint32_t msec = + 1000 * (duration.seconds() > MaxDurationSec ? MaxDurationSec + : duration.seconds()); + msec += duration.nanos() / 1000 / 1000; + return msec; +} + CheckOptions GetJustCheckOptions(const TransportConfig& config) { if (config.disable_check_cache()) { return CheckOptions(0); @@ -47,9 +59,25 @@ CheckOptions GetJustCheckOptions(const TransportConfig& config) { CheckOptions GetCheckOptions(const TransportConfig& config) { auto options = GetJustCheckOptions(config); - if (config.has_network_fail_policy() && - config.network_fail_policy().policy() == NetworkFailPolicy::FAIL_CLOSE) { - options.network_fail_open = false; + if (config.has_network_fail_policy()) { + if (config.network_fail_policy().policy() == + NetworkFailPolicy::FAIL_CLOSE) { + options.network_fail_open = false; + } + + if (0 <= config.network_fail_policy().max_retry()) { + options.retries = config.network_fail_policy().max_retry(); + } + + if (config.network_fail_policy().has_base_retry_wait()) { + options.base_retry_ms = + DurationToMsec(config.network_fail_policy().base_retry_wait()); + } + + if (config.network_fail_policy().has_max_retry_wait()) { + options.max_retry_ms = + DurationToMsec(config.network_fail_policy().max_retry_wait()); + } } return options; } @@ -79,37 +107,23 @@ ClientContextBase::ClientContextBase(const TransportConfig& config, options.env = env; mixer_client_ = ::istio::mixerclient::CreateMixerClient(options); CreateLocalAttributes(local_node, &local_attributes_); + network_fail_open_ = options.check_options.network_fail_open; + retries_ = options.check_options.retries; } -CancelFunc ClientContextBase::SendCheck(TransportCheckFunc transport, - CheckDoneFunc on_done, - RequestContext* request) { - // Intercept the callback to save check status in request_context - auto local_on_done = [request, - on_done](const CheckResponseInfo& check_response_info) { - // save the check status code - request->check_status = check_response_info.response_status; - - utils::AttributesBuilder builder(request->attributes); - builder.AddBool(utils::AttributeName::kCheckCacheHit, - check_response_info.is_check_cache_hit); - builder.AddBool(utils::AttributeName::kQuotaCacheHit, - check_response_info.is_quota_cache_hit); - on_done(check_response_info); - }; - - // TODO: add debug message - // GOOGLE_LOG(INFO) << "Check attributes: " << - // request->attributes->DebugString(); - return mixer_client_->Check(*request->attributes, request->quotas, transport, - local_on_done); +void ClientContextBase::SendCheck( + const TransportCheckFunc& transport, const CheckDoneFunc& on_done, + ::istio::mixerclient::CheckContextSharedPtr& context) { + MIXER_DEBUG("Check attributes: %s", + context->attributes()->DebugString().c_str()); + return mixer_client_->Check(context, transport, on_done); } -void ClientContextBase::SendReport(const RequestContext& request) { - // TODO: add debug message - // GOOGLE_LOG(INFO) << "Report attributes: " << - // request.attributes->DebugString(); - mixer_client_->Report(*request.attributes); +void ClientContextBase::SendReport( + const istio::mixerclient::SharedAttributesSharedPtr& attributes) { + MIXER_DEBUG("Report attributes: %s", + attributes->attributes()->DebugString().c_str()); + mixer_client_->Report(attributes); } void ClientContextBase::GetStatistics(Statistics* stat) const { diff --git a/src/istio/control/client_context_base.h b/src/istio/control/client_context_base.h index 560c090083c..e3640f4d203 100644 --- a/src/istio/control/client_context_base.h +++ b/src/istio/control/client_context_base.h @@ -17,10 +17,12 @@ #define ISTIO_CONTROL_CLIENT_CONTEXT_BASE_H #include "include/istio/mixerclient/client.h" +#include "include/istio/mixerclient/timer.h" #include "include/istio/utils/attribute_names.h" #include "include/istio/utils/local_attributes.h" #include "mixer/v1/config/client/client_config.pb.h" -#include "request_context.h" +#include "src/istio/mixerclient/check_context.h" +#include "src/istio/mixerclient/shared_attributes.h" namespace istio { namespace control { @@ -40,17 +42,20 @@ class ClientContextBase { bool outbound, ::istio::utils::LocalAttributes& local_attributes) : mixer_client_(std::move(mixer_client)), outbound_(outbound), - local_attributes_(local_attributes) {} + local_attributes_(local_attributes), + network_fail_open_(false), + retries_(0) {} // virtual destrutor virtual ~ClientContextBase() {} // Use mixer client object to make a Check call. - ::istio::mixerclient::CancelFunc SendCheck( - ::istio::mixerclient::TransportCheckFunc transport, - ::istio::mixerclient::CheckDoneFunc on_done, RequestContext* request); + void SendCheck(const ::istio::mixerclient::TransportCheckFunc& transport, + const ::istio::mixerclient::CheckDoneFunc& on_done, + ::istio::mixerclient::CheckContextSharedPtr& check_context); // Use mixer client object to make a Report call. - void SendReport(const RequestContext& request); + void SendReport( + const istio::mixerclient::SharedAttributesSharedPtr& attributes); // Get statistics. void GetStatistics(::istio::mixerclient::Statistics* stat) const; @@ -60,6 +65,10 @@ class ClientContextBase { void AddLocalNodeForwardAttribues( ::istio::mixer::v1::Attributes* request) const; + bool NetworkFailOpen() const { return network_fail_open_; } + + uint32_t Retries() const { return retries_; } + private: // The mixer client object with check cache and report batch features. std::unique_ptr<::istio::mixerclient::MixerClient> mixer_client_; @@ -69,6 +78,9 @@ class ClientContextBase { // local attributes - owned by the client context. ::istio::utils::LocalAttributes local_attributes_; + + bool network_fail_open_; + uint32_t retries_; }; } // namespace control diff --git a/src/istio/control/http/attributes_builder.cc b/src/istio/control/http/attributes_builder.cc index 5e4e69fdd68..32ffa5c8423 100644 --- a/src/istio/control/http/attributes_builder.cc +++ b/src/istio/control/http/attributes_builder.cc @@ -17,6 +17,7 @@ #include +#include "google/protobuf/stubs/status.h" #include "include/istio/utils/attribute_names.h" #include "include/istio/utils/attributes_builder.h" #include "include/istio/utils/status.h" @@ -35,7 +36,7 @@ const std::set kGrpcContentTypes{ } // namespace void AttributesBuilder::ExtractRequestHeaderAttributes(CheckData *check_data) { - utils::AttributesBuilder builder(request_->attributes); + utils::AttributesBuilder builder(attributes_); std::map headers = check_data->GetRequestHeaders(); builder.AddStringMap(utils::AttributeName::kRequestHeaders, headers); @@ -79,7 +80,7 @@ void AttributesBuilder::ExtractRequestHeaderAttributes(CheckData *check_data) { } void AttributesBuilder::ExtractAuthAttributes(CheckData *check_data) { - utils::AttributesBuilder builder(request_->attributes); + utils::AttributesBuilder builder(attributes_); std::string destination_principal; if (check_data->GetPrincipal(false, &destination_principal)) { @@ -130,18 +131,38 @@ void AttributesBuilder::ExtractForwardedAttributes(CheckData *check_data) { if (!check_data->ExtractIstioAttributes(&forwarded_data)) { return; } + Attributes v2_format; - if (v2_format.ParseFromString(forwarded_data)) { - request_->attributes->MergeFrom(v2_format); + if (!v2_format.ParseFromString(forwarded_data)) { return; } + + static const std::set kForwardWhitelist = { + utils::AttributeName::kSourceUID, + utils::AttributeName::kSourceNamespace, + utils::AttributeName::kDestinationServiceName, + utils::AttributeName::kDestinationServiceUID, + utils::AttributeName::kDestinationServiceHost, + utils::AttributeName::kDestinationServiceNamespace, + }; + + auto fwd = v2_format.attributes(); + utils::AttributesBuilder builder(attributes_); + for (const auto &attribute : kForwardWhitelist) { + const auto &iter = fwd.find(attribute); + if (iter != fwd.end() && !iter->second.string_value().empty()) { + builder.AddString(attribute, iter->second.string_value()); + } + } + + return; } void AttributesBuilder::ExtractCheckAttributes(CheckData *check_data) { ExtractRequestHeaderAttributes(check_data); ExtractAuthAttributes(check_data); - utils::AttributesBuilder builder(request_->attributes); + utils::AttributesBuilder builder(attributes_); // connection remote IP is always reported as origin IP std::string source_ip; @@ -180,8 +201,9 @@ void AttributesBuilder::ForwardAttributes(const Attributes &forward_attributes, header_update->AddIstioAttributes(str); } -void AttributesBuilder::ExtractReportAttributes(ReportData *report_data) { - utils::AttributesBuilder builder(request_->attributes); +void AttributesBuilder::ExtractReportAttributes( + const ::google::protobuf::util::Status &status, ReportData *report_data) { + utils::AttributesBuilder builder(attributes_); std::string dest_ip; int dest_port; @@ -218,14 +240,13 @@ void AttributesBuilder::ExtractReportAttributes(ReportData *report_data) { builder.AddInt64(utils::AttributeName::kResponseTotalSize, info.response_total_size); builder.AddDuration(utils::AttributeName::kResponseDuration, info.duration); - if (!request_->check_status.ok()) { - builder.AddInt64( - utils::AttributeName::kResponseCode, - utils::StatusHttpCode(request_->check_status.error_code())); + if (status != ::google::protobuf::util::Status::UNKNOWN && !status.ok()) { + builder.AddInt64(utils::AttributeName::kResponseCode, + utils::StatusHttpCode(status.error_code())); builder.AddInt64(utils::AttributeName::kCheckErrorCode, - request_->check_status.error_code()); + status.error_code()); builder.AddString(utils::AttributeName::kCheckErrorMessage, - request_->check_status.ToString()); + status.ToString()); } else { builder.AddInt64(utils::AttributeName::kResponseCode, info.response_code); } diff --git a/src/istio/control/http/attributes_builder.h b/src/istio/control/http/attributes_builder.h index a8346f7f5ec..0385f152bd7 100644 --- a/src/istio/control/http/attributes_builder.h +++ b/src/istio/control/http/attributes_builder.h @@ -18,7 +18,7 @@ #include "include/istio/control/http/check_data.h" #include "include/istio/control/http/report_data.h" -#include "src/istio/control/request_context.h" +#include "mixer/v1/attributes.pb.h" namespace istio { namespace control { @@ -27,7 +27,8 @@ namespace http { // The context for each HTTP request. class AttributesBuilder { public: - AttributesBuilder(RequestContext* request) : request_(request) {} + AttributesBuilder(istio::mixer::v1::Attributes* attributes) + : attributes_(attributes) {} // Extract forwarded attributes from HTTP header. void ExtractForwardedAttributes(CheckData* check_data); @@ -39,7 +40,8 @@ class AttributesBuilder { // Extract attributes for Check call. void ExtractCheckAttributes(CheckData* check_data); // Extract attributes for Report call. - void ExtractReportAttributes(ReportData* report_data); + void ExtractReportAttributes(const ::google::protobuf::util::Status& status, + ReportData* report_data); private: // Extract HTTP header attributes @@ -52,8 +54,7 @@ class AttributesBuilder { // not available. void ExtractAuthAttributes(CheckData* check_data); - // The request context object. - RequestContext* request_; + istio::mixer::v1::Attributes* attributes_; }; } // namespace http diff --git a/src/istio/control/http/attributes_builder_test.cc b/src/istio/control/http/attributes_builder_test.cc index 31c14b947e9..09c84bc1567 100644 --- a/src/istio/control/http/attributes_builder_test.cc +++ b/src/istio/control/http/attributes_builder_test.cc @@ -16,6 +16,7 @@ #include "src/istio/control/http/attributes_builder.h" #include "gmock/gmock.h" +#include "google/protobuf/stubs/status.h" #include "google/protobuf/text_format.h" #include "google/protobuf/util/message_differencer.h" #include "gtest/gtest.h" @@ -531,21 +532,23 @@ fields { } )"; -void ClearContextTime(const std::string &name, RequestContext *request) { +void ClearContextTime(const std::string &name, + istio::mixer::v1::Attributes *attributes) { // Override timestamp with - - utils::AttributesBuilder builder(request->attributes); + utils::AttributesBuilder builder(attributes); std::chrono::time_point time0; builder.AddTimestamp(name, time0); } -void SetDestinationIp(RequestContext *request, const std::string &ip) { - utils::AttributesBuilder builder(request->attributes); +void SetDestinationIp(istio::mixer::v1::Attributes *attributes, + const std::string &ip) { + utils::AttributesBuilder builder(attributes); builder.AddBytes(utils::AttributeName::kDestinationIp, ip); } TEST(AttributesBuilderTest, TestExtractForwardedAttributes) { Attributes attr; - (*attr.mutable_attributes())["test_key"].set_string_value("test_value"); + (*attr.mutable_attributes())["source.uid"].set_string_value("test_value"); ::testing::StrictMock mock_data; EXPECT_CALL(mock_data, ExtractIstioAttributes(_)) @@ -554,10 +557,10 @@ TEST(AttributesBuilderTest, TestExtractForwardedAttributes) { return true; })); - RequestContext request; - AttributesBuilder builder(&request); + istio::mixer::v1::Attributes attributes; + AttributesBuilder builder(&attributes); builder.ExtractForwardedAttributes(&mock_data); - EXPECT_THAT(*request.attributes, EqualsAttribute(attr)); + EXPECT_THAT(attributes, EqualsAttribute(attr)); } TEST(AttributesBuilderTest, TestForwardAttributes) { @@ -638,16 +641,16 @@ TEST(AttributesBuilderTest, TestCheckAttributesWithoutAuthnFilter) { return true; })); - RequestContext request; - AttributesBuilder builder(&request); + istio::mixer::v1::Attributes attributes; + AttributesBuilder builder(&attributes); builder.ExtractCheckAttributes(&mock_data); - ClearContextTime(utils::AttributeName::kRequestTime, &request); + ClearContextTime(utils::AttributeName::kRequestTime, &attributes); Attributes expected_attributes; ASSERT_TRUE(TextFormat::ParseFromString(kCheckAttributesWithoutAuthnFilter, &expected_attributes)); - EXPECT_THAT(*request.attributes, EqualsAttribute(expected_attributes)); + EXPECT_THAT(attributes, EqualsAttribute(expected_attributes)); } TEST(AttributesBuilderTest, TestCheckAttributes) { @@ -712,16 +715,16 @@ TEST(AttributesBuilderTest, TestCheckAttributes) { return true; })); - RequestContext request; - AttributesBuilder builder(&request); + istio::mixer::v1::Attributes attributes; + AttributesBuilder builder(&attributes); builder.ExtractCheckAttributes(&mock_data); - ClearContextTime(utils::AttributeName::kRequestTime, &request); + ClearContextTime(utils::AttributeName::kRequestTime, &attributes); Attributes expected_attributes; ASSERT_TRUE( TextFormat::ParseFromString(kCheckAttributes, &expected_attributes)); - EXPECT_THAT(*request.attributes, EqualsAttribute(expected_attributes)); + EXPECT_THAT(attributes, EqualsAttribute(expected_attributes)); } TEST(AttributesBuilderTest, TestReportAttributes) { @@ -742,6 +745,7 @@ TEST(AttributesBuilderTest, TestReportAttributes) { listval.mutable_list_value()->add_values()->set_string_value("c"); (*struct_obj.mutable_fields())["list"] = listval; filter_metadata["foo.bar.com"] = struct_obj; + filter_metadata["istio.mixer"] = struct_obj; // to be ignored EXPECT_CALL(mock_data, GetDestinationIpPort(_, _)) .WillOnce(Invoke([](std::string *ip, int *port) -> bool { @@ -786,11 +790,12 @@ TEST(AttributesBuilderTest, TestReportAttributes) { EXPECT_CALL(mock_data, GetDynamicFilterState()) .WillOnce(ReturnRef(filter_metadata)); - RequestContext request; - AttributesBuilder builder(&request); - builder.ExtractReportAttributes(&mock_data); + istio::mixer::v1::Attributes attributes; + AttributesBuilder builder(&attributes); + builder.ExtractReportAttributes(::google::protobuf::util::Status::OK, + &mock_data); - ClearContextTime(utils::AttributeName::kResponseTime, &request); + ClearContextTime(utils::AttributeName::kResponseTime, &attributes); Attributes expected_attributes; ASSERT_TRUE( @@ -804,7 +809,7 @@ TEST(AttributesBuilderTest, TestReportAttributes) { (*expected_attributes .mutable_attributes())[utils::AttributeName::kResponseGrpcMessage] .set_string_value("grpc-message"); - EXPECT_THAT(*request.attributes, EqualsAttribute(expected_attributes)); + EXPECT_THAT(attributes, EqualsAttribute(expected_attributes)); } TEST(AttributesBuilderTest, TestReportAttributesWithDestIP) { @@ -860,17 +865,18 @@ TEST(AttributesBuilderTest, TestReportAttributesWithDestIP) { EXPECT_CALL(mock_data, GetDynamicFilterState()) .WillOnce(ReturnRef(filter_metadata)); - RequestContext request; - SetDestinationIp(&request, "1.2.3.4"); - AttributesBuilder builder(&request); - builder.ExtractReportAttributes(&mock_data); + istio::mixer::v1::Attributes attributes; + SetDestinationIp(&attributes, "1.2.3.4"); + AttributesBuilder builder(&attributes); + builder.ExtractReportAttributes(::google::protobuf::util::Status::OK, + &mock_data); - ClearContextTime(utils::AttributeName::kResponseTime, &request); + ClearContextTime(utils::AttributeName::kResponseTime, &attributes); Attributes expected_attributes; ASSERT_TRUE( TextFormat::ParseFromString(kReportAttributes, &expected_attributes)); - EXPECT_THAT(*request.attributes, EqualsAttribute(expected_attributes)); + EXPECT_THAT(attributes, EqualsAttribute(expected_attributes)); } } // namespace diff --git a/src/istio/control/http/request_handler_impl.cc b/src/istio/control/http/request_handler_impl.cc index 87144b4a8ec..a5111a01e4d 100644 --- a/src/istio/control/http/request_handler_impl.cc +++ b/src/istio/control/http/request_handler_impl.cc @@ -20,6 +20,7 @@ using ::google::protobuf::util::Status; using ::istio::mixerclient::CancelFunc; using ::istio::mixerclient::CheckDoneFunc; using ::istio::mixerclient::CheckResponseInfo; +using ::istio::mixerclient::TimerCreateFunc; using ::istio::mixerclient::TransportCheckFunc; using ::istio::quota_config::Requirement; @@ -29,9 +30,11 @@ namespace http { RequestHandlerImpl::RequestHandlerImpl( std::shared_ptr service_context) - : service_context_(service_context), - check_attributes_added_(false), - forward_attributes_added_(false) {} + : attributes_(new istio::mixerclient::SharedAttributes()), + check_context_(new istio::mixerclient::CheckContext( + service_context->client_context()->Retries(), + service_context->client_context()->NetworkFailOpen(), attributes_)), + service_context_(service_context) {} void RequestHandlerImpl::AddForwardAttributes(CheckData* check_data) { if (forward_attributes_added_) { @@ -39,7 +42,7 @@ void RequestHandlerImpl::AddForwardAttributes(CheckData* check_data) { } forward_attributes_added_ = true; - AttributesBuilder builder(&request_context_); + AttributesBuilder builder(attributes_->attributes()); builder.ExtractForwardedAttributes(check_data); } @@ -51,38 +54,49 @@ void RequestHandlerImpl::AddCheckAttributes(CheckData* check_data) { if (service_context_->enable_mixer_check() || service_context_->enable_mixer_report()) { - service_context_->AddStaticAttributes(&request_context_); + service_context_->AddStaticAttributes(attributes_->attributes()); - AttributesBuilder builder(&request_context_); + AttributesBuilder builder(attributes_->attributes()); builder.ExtractCheckAttributes(check_data); - service_context_->AddApiAttributes(check_data, &request_context_); + service_context_->AddApiAttributes(check_data, attributes_->attributes()); } } -CancelFunc RequestHandlerImpl::Check(CheckData* check_data, - HeaderUpdate* header_update, - TransportCheckFunc transport, - CheckDoneFunc on_done) { +void RequestHandlerImpl::Check(CheckData* check_data, + HeaderUpdate* header_update, + const TransportCheckFunc& transport, + const CheckDoneFunc& on_done) { // Forwarded attributes need to be stored regardless Check is needed // or not since the header will be updated or removed. + AddCheckAttributes(check_data); AddForwardAttributes(check_data); header_update->RemoveIstioAttributes(); service_context_->InjectForwardedAttributes(header_update); if (!service_context_->enable_mixer_check()) { - CheckResponseInfo check_response_info; - check_response_info.response_status = Status::OK; - on_done(check_response_info); - return nullptr; + check_context_->setFinalStatus(Status::OK, false); + on_done(*check_context_); + return; } - AddCheckAttributes(check_data); + service_context_->AddQuotas(attributes_->attributes(), + check_context_->quotaRequirements()); + + service_context_->client_context()->SendCheck(transport, on_done, + check_context_); +} - service_context_->AddQuotas(&request_context_); +void RequestHandlerImpl::ResetCancel() { + if (check_context_) { + check_context_->resetCancel(); + } +} - return service_context_->client_context()->SendCheck(transport, on_done, - &request_context_); +void RequestHandlerImpl::CancelCheck() { + if (check_context_) { + check_context_->cancel(); + } } // Make remote report call. @@ -95,10 +109,10 @@ void RequestHandlerImpl::Report(CheckData* check_data, AddForwardAttributes(check_data); AddCheckAttributes(check_data); - AttributesBuilder builder(&request_context_); - builder.ExtractReportAttributes(report_data); + AttributesBuilder builder(attributes_->attributes()); + builder.ExtractReportAttributes(check_context_->status(), report_data); - service_context_->client_context()->SendReport(request_context_); + service_context_->client_context()->SendReport(attributes_); } } // namespace http diff --git a/src/istio/control/http/request_handler_impl.h b/src/istio/control/http/request_handler_impl.h index 0e8c2b28072..d454a4f505b 100644 --- a/src/istio/control/http/request_handler_impl.h +++ b/src/istio/control/http/request_handler_impl.h @@ -19,7 +19,7 @@ #include "include/istio/control/http/request_handler.h" #include "src/istio/control/http/client_context.h" #include "src/istio/control/http/service_context.h" -#include "src/istio/control/request_context.h" +#include "src/istio/mixerclient/check_context.h" namespace istio { namespace control { @@ -30,11 +30,16 @@ class RequestHandlerImpl : public RequestHandler { public: RequestHandlerImpl(std::shared_ptr service_context); + virtual ~RequestHandlerImpl() = default; + // Makes a Check call. - ::istio::mixerclient::CancelFunc Check( - CheckData* check_data, HeaderUpdate* header_update, - ::istio::mixerclient::TransportCheckFunc transport, - ::istio::mixerclient::CheckDoneFunc on_done) override; + void Check(CheckData* check_data, HeaderUpdate* header_update, + const ::istio::mixerclient::TransportCheckFunc& transport, + const ::istio::mixerclient::CheckDoneFunc& on_done) override; + + void ResetCancel() override; + + void CancelCheck() override; // Make a Report call. void Report(CheckData* check_data, ReportData* report_data) override; @@ -45,16 +50,16 @@ class RequestHandlerImpl : public RequestHandler { // Add check attributes, allow re-entry void AddCheckAttributes(CheckData* check_data); - // The request context object. - RequestContext request_context_; + // memory for telemetry reports and policy checks. Telemetry only needs the + // shared attributes. + istio::mixerclient::SharedAttributesSharedPtr attributes_; + istio::mixerclient::CheckContextSharedPtr check_context_; // The service context. std::shared_ptr service_context_; - // If true, request attributes are added - bool check_attributes_added_; - // If true, forward attributes are added - bool forward_attributes_added_; + bool check_attributes_added_{false}; + bool forward_attributes_added_{false}; }; } // namespace http diff --git a/src/istio/control/http/request_handler_impl_test.cc b/src/istio/control/http/request_handler_impl_test.cc index 4fc87aa12f0..8c4e09cdcb6 100644 --- a/src/istio/control/http/request_handler_impl_test.cc +++ b/src/istio/control/http/request_handler_impl_test.cc @@ -28,6 +28,7 @@ using ::istio::mixer::v1::Attributes; using ::istio::mixer::v1::config::client::HttpClientConfig; using ::istio::mixer::v1::config::client::ServiceConfig; using ::istio::mixerclient::CancelFunc; +using ::istio::mixerclient::CheckContextSharedPtr; using ::istio::mixerclient::CheckDoneFunc; using ::istio::mixerclient::CheckResponseInfo; using ::istio::mixerclient::DoneFunc; @@ -49,7 +50,7 @@ const char kLocalInbound[] = R"( attributes { key: "destination.uid" value { - string_value: "kubernetes://client-84469dc8d7-jbbxt.default" + string_value: "kubernetes://dest-client-84469dc8d7-jbbxt.default" } } )"; @@ -58,7 +59,7 @@ const char kLocalOutbound[] = R"( attributes { key: "source.uid" value { - string_value: "kubernetes://client-84469dc8d7-jbbxt.default" + string_value: "kubernetes://src-client-84469dc8d7-jbbxt.default" } } )"; @@ -128,6 +129,7 @@ forward_attributes { class RequestHandlerImplTest : public ::testing::Test { public: + RequestHandlerImplTest(bool outbound = false) : outbound_(outbound) {} void SetUp() { SetUpMockController(kDefaultClientConfig); } void SetUpMockController(const std::string &config_text) { @@ -154,7 +156,7 @@ class RequestHandlerImplTest : public ::testing::Test { client_context_ = std::make_shared( std::unique_ptr(mock_client_), client_config_, 3, la, - false); + outbound_); controller_ = std::unique_ptr(new ControllerImpl(client_context_)); } @@ -173,6 +175,14 @@ class RequestHandlerImplTest : public ::testing::Test { HttpClientConfig client_config_; ::testing::NiceMock *mock_client_; std::unique_ptr controller_; + + private: + bool outbound_; +}; + +class OutboundRequestHandlerImplTest : public RequestHandlerImplTest { + public: + OutboundRequestHandlerImplTest() : RequestHandlerImplTest(true) {} }; TEST_F(RequestHandlerImplTest, TestServiceConfigManage) { @@ -201,7 +211,7 @@ TEST_F(RequestHandlerImplTest, TestHandlerDisabledCheckReport) { EXPECT_CALL(mock_data, GetPrincipal(_, _)).Times(0); // Check should NOT be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)).Times(0); + EXPECT_CALL(*mock_client_, Check(_, _, _)).Times(0); ServiceConfig config; config.set_disable_check_calls(true); @@ -210,21 +220,20 @@ TEST_F(RequestHandlerImplTest, TestHandlerDisabledCheckReport) { ApplyPerRouteConfig(config, &per_route); auto handler = controller_->CreateRequestHandler(per_route); - handler->Check(&mock_data, &mock_header, nullptr, - [](const CheckResponseInfo &info) { - EXPECT_TRUE(info.response_status.ok()); - }); + handler->Check( + &mock_data, &mock_header, nullptr, + [](const CheckResponseInfo &info) { EXPECT_TRUE(info.status().ok()); }); } TEST_F(RequestHandlerImplTest, TestHandlerDisabledCheck) { ::testing::NiceMock mock_data; ::testing::NiceMock mock_header; - // Report is enabled so Check Attributes are not extracted. - EXPECT_CALL(mock_data, GetSourceIpPort(_, _)).Times(0); - EXPECT_CALL(mock_data, GetPrincipal(_, _)).Times(0); + // Report is enabled so Check Attributes are extracted but not sent. + EXPECT_CALL(mock_data, GetSourceIpPort(_, _)).Times(1); + EXPECT_CALL(mock_data, GetPrincipal(_, _)).Times(2); // Check should NOT be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)).Times(0); + EXPECT_CALL(*mock_client_, Check(_, _, _)).Times(0); ServiceConfig config; config.set_disable_check_calls(true); @@ -232,10 +241,9 @@ TEST_F(RequestHandlerImplTest, TestHandlerDisabledCheck) { ApplyPerRouteConfig(config, &per_route); auto handler = controller_->CreateRequestHandler(per_route); - handler->Check(&mock_data, &mock_header, nullptr, - [](const CheckResponseInfo &info) { - EXPECT_TRUE(info.response_status.ok()); - }); + handler->Check( + &mock_data, &mock_header, nullptr, + [](const CheckResponseInfo &info) { EXPECT_TRUE(info.status().ok()); }); } TEST_F(RequestHandlerImplTest, TestPerRouteAttributes) { @@ -245,15 +253,13 @@ TEST_F(RequestHandlerImplTest, TestPerRouteAttributes) { EXPECT_CALL(mock_data, GetPrincipal(_, _)).Times(2); // Check should be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)) - .WillOnce(Invoke([](const Attributes &attributes, - const std::vector "as, - TransportCheckFunc transport, - CheckDoneFunc on_done) -> CancelFunc { - auto map = attributes.attributes(); + EXPECT_CALL(*mock_client_, Check(_, _, _)) + .WillOnce(Invoke([](CheckContextSharedPtr &context, + const TransportCheckFunc &transport, + const CheckDoneFunc &on_done) { + auto map = context->attributes()->attributes(); EXPECT_EQ(map["global-key"].string_value(), "global-value"); EXPECT_EQ(map["per-route-key"].string_value(), "per-route-value"); - return nullptr; })); ServiceConfig config; @@ -273,15 +279,13 @@ TEST_F(RequestHandlerImplTest, TestDefaultRouteAttributes) { EXPECT_CALL(mock_data, GetPrincipal(_, _)).Times(2); // Check should be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)) - .WillOnce(Invoke([](const Attributes &attributes, - const std::vector "as, - TransportCheckFunc transport, - CheckDoneFunc on_done) -> CancelFunc { - auto map = attributes.attributes(); + EXPECT_CALL(*mock_client_, Check(_, _, _)) + .WillOnce(Invoke([](CheckContextSharedPtr &context, + const TransportCheckFunc &transport, + const CheckDoneFunc &on_done) { + auto map = context->attributes()->attributes(); EXPECT_EQ(map["global-key"].string_value(), "global-value"); EXPECT_EQ(map["route0-key"].string_value(), "route0-value"); - return nullptr; })); // Attribute is forwarded: route override @@ -312,15 +316,13 @@ TEST_F(RequestHandlerImplTest, TestRouteAttributes) { SetServiceConfig("route1", route_config); // Check should be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)) - .WillOnce(Invoke([](const Attributes &attributes, - const std::vector "as, - TransportCheckFunc transport, - CheckDoneFunc on_done) -> CancelFunc { - auto map = attributes.attributes(); + EXPECT_CALL(*mock_client_, Check(_, _, _)) + .WillOnce(Invoke([](CheckContextSharedPtr &context, + const TransportCheckFunc &transport, + const CheckDoneFunc &on_done) { + auto map = context->attributes()->attributes(); EXPECT_EQ(map["global-key"].string_value(), "service-value"); EXPECT_EQ(map["route1-key"].string_value(), "route1-value"); - return nullptr; })); // Attribute is forwarded: global @@ -343,17 +345,15 @@ TEST_F(RequestHandlerImplTest, TestPerRouteQuota) { ::testing::NiceMock mock_header; // Check should be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)) - .WillOnce(Invoke([](const Attributes &attributes, - const std::vector "as, - TransportCheckFunc transport, - CheckDoneFunc on_done) -> CancelFunc { - auto map = attributes.attributes(); + EXPECT_CALL(*mock_client_, Check(_, _, _)) + .WillOnce(Invoke([](CheckContextSharedPtr &context, + const TransportCheckFunc &transport, + const CheckDoneFunc &on_done) { + auto map = context->attributes()->attributes(); EXPECT_EQ(map["global-key"].string_value(), "global-value"); - EXPECT_EQ(quotas.size(), 1); - EXPECT_EQ(quotas[0].quota, "route0-quota"); - EXPECT_EQ(quotas[0].charge, 10); - return nullptr; + EXPECT_EQ(context->quotaRequirements().size(), 1); + EXPECT_EQ(context->quotaRequirements()[0].quota, "route0-quota"); + EXPECT_EQ(context->quotaRequirements()[0].charge, 10); })); ServiceConfig config; @@ -385,16 +385,14 @@ TEST_F(RequestHandlerImplTest, TestPerRouteApiSpec) { })); // Check should be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)) - .WillOnce(Invoke([](const Attributes &attributes, - const std::vector "as, - TransportCheckFunc transport, - CheckDoneFunc on_done) -> CancelFunc { - auto map = attributes.attributes(); + EXPECT_CALL(*mock_client_, Check(_, _, _)) + .WillOnce(Invoke([](CheckContextSharedPtr &context, + const TransportCheckFunc &transport, + const CheckDoneFunc &on_done) { + auto map = context->attributes()->attributes(); EXPECT_EQ(map["global-key"].string_value(), "global-value"); EXPECT_EQ(map["api.name"].string_value(), "test-name"); EXPECT_EQ(map["api.operation"].string_value(), "test-method"); - return nullptr; })); ServiceConfig config; @@ -421,7 +419,7 @@ TEST_F(RequestHandlerImplTest, TestHandlerCheck) { EXPECT_CALL(mock_data, GetPrincipal(_, _)).Times(2); // Check should be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)).Times(1); + EXPECT_CALL(*mock_client_, Check(_, _, _)).Times(1); ServiceConfig config; Controller::PerRouteConfig per_route; @@ -445,15 +443,13 @@ TEST_F(RequestHandlerImplTest, TestDefaultApiKey) { })); // Check should be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)) - .WillOnce(Invoke([](const Attributes &attributes, - const std::vector "as, - TransportCheckFunc transport, - CheckDoneFunc on_done) -> CancelFunc { - auto map = attributes.attributes(); + EXPECT_CALL(*mock_client_, Check(_, _, _)) + .WillOnce(Invoke([](CheckContextSharedPtr &context, + const TransportCheckFunc &transport, + const CheckDoneFunc &on_done) { + auto map = context->attributes()->attributes(); EXPECT_EQ(map[utils::AttributeName::kRequestApiKey].string_value(), "test-api-key"); - return nullptr; })); // destionation.server is empty, will use default one @@ -524,7 +520,7 @@ TEST_F(RequestHandlerImplTest, TestEmptyConfig) { })); // Check should NOT be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)).Times(0); + EXPECT_CALL(*mock_client_, Check(_, _, _)).Times(0); ::testing::NiceMock mock_report; EXPECT_CALL(mock_report, GetResponseHeaders()).Times(0); @@ -536,13 +532,64 @@ TEST_F(RequestHandlerImplTest, TestEmptyConfig) { Controller::PerRouteConfig config; auto handler = controller_->CreateRequestHandler(config); - handler->Check(&mock_check, &mock_header, nullptr, - [](const CheckResponseInfo &info) { - EXPECT_TRUE(info.response_status.ok()); - }); + handler->Check( + &mock_check, &mock_header, nullptr, + [](const CheckResponseInfo &info) { EXPECT_TRUE(info.status().ok()); }); handler->Report(&mock_check, &mock_report); } +TEST_F(OutboundRequestHandlerImplTest, TestLocalAttributes) { + ::testing::NiceMock mock_data; + ::testing::NiceMock mock_header; + // Check should be called. + EXPECT_CALL(*mock_client_, Check(_, _, _)) + .WillOnce(Invoke([](CheckContextSharedPtr &context, + const TransportCheckFunc &transport, + const CheckDoneFunc &on_done) { + auto map = context->attributes()->attributes(); + EXPECT_EQ(map["source.uid"].string_value(), + "kubernetes://src-client-84469dc8d7-jbbxt.default"); + })); + + ServiceConfig config; + Controller::PerRouteConfig per_route; + ApplyPerRouteConfig(config, &per_route); + auto handler = controller_->CreateRequestHandler(per_route); + handler->Check(&mock_data, &mock_header, nullptr, nullptr); +} + +TEST_F(OutboundRequestHandlerImplTest, TestLocalAttributesOverride) { + ::testing::NiceMock mock_data; + ::testing::NiceMock mock_header; + + EXPECT_CALL(mock_data, ExtractIstioAttributes(_)) + .WillOnce(Invoke([](std::string *data) -> bool { + Attributes fwd_attr; + (*fwd_attr.mutable_attributes())["source.uid"].set_string_value( + "fwded"); + (*fwd_attr.mutable_attributes())["destination.uid"].set_string_value( + "ignored"); + fwd_attr.SerializeToString(data); + return true; + })); + + // Check should be called. + EXPECT_CALL(*mock_client_, Check(_, _, _)) + .WillOnce(Invoke([](CheckContextSharedPtr &context, + const TransportCheckFunc &transport, + const CheckDoneFunc &on_done) { + auto map = context->attributes()->attributes(); + EXPECT_EQ(map["source.uid"].string_value(), "fwded"); + EXPECT_NE(map["destination.uid"].string_value(), "ignored"); + })); + + ServiceConfig config; + Controller::PerRouteConfig per_route; + ApplyPerRouteConfig(config, &per_route); + auto handler = controller_->CreateRequestHandler(per_route); + handler->Check(&mock_data, &mock_header, nullptr, nullptr); +} + } // namespace http } // namespace control } // namespace istio diff --git a/src/istio/control/http/service_context.cc b/src/istio/control/http/service_context.cc index dc1332db698..3882dffaa6a 100644 --- a/src/istio/control/http/service_context.cc +++ b/src/istio/control/http/service_context.cc @@ -51,15 +51,15 @@ void ServiceContext::BuildParsers() { } // Add static mixer attributes. -void ServiceContext::AddStaticAttributes(RequestContext *request) const { - client_context_->AddLocalNodeAttributes(request->attributes); +void ServiceContext::AddStaticAttributes( + ::istio::mixer::v1::Attributes *attributes) const { + client_context_->AddLocalNodeAttributes(attributes); if (client_context_->config().has_mixer_attributes()) { - request->attributes->MergeFrom( - client_context_->config().mixer_attributes()); + attributes->MergeFrom(client_context_->config().mixer_attributes()); } if (service_config_ && service_config_->has_mixer_attributes()) { - request->attributes->MergeFrom(service_config_->mixer_attributes()); + attributes->MergeFrom(service_config_->mixer_attributes()); } } @@ -82,8 +82,8 @@ void ServiceContext::InjectForwardedAttributes( } } -void ServiceContext::AddApiAttributes(CheckData *check_data, - RequestContext *request) const { +void ServiceContext::AddApiAttributes( + CheckData *check_data, ::istio::mixer::v1::Attributes *attributes) const { if (!api_spec_parser_) { return; } @@ -91,21 +91,22 @@ void ServiceContext::AddApiAttributes(CheckData *check_data, std::string path; if (check_data->FindHeaderByType(CheckData::HEADER_METHOD, &http_method) && check_data->FindHeaderByType(CheckData::HEADER_PATH, &path)) { - api_spec_parser_->AddAttributes(http_method, path, request->attributes); + api_spec_parser_->AddAttributes(http_method, path, attributes); } std::string api_key; if (api_spec_parser_->ExtractApiKey(check_data, &api_key)) { - (*request->attributes - ->mutable_attributes())[utils::AttributeName::kRequestApiKey] + (*attributes->mutable_attributes())[utils::AttributeName::kRequestApiKey] .set_string_value(api_key); } } // Add quota requirements from quota configs. -void ServiceContext::AddQuotas(RequestContext *request) const { +void ServiceContext::AddQuotas( + ::istio::mixer::v1::Attributes *attributes, + std::vector<::istio::quota_config::Requirement> "as) const { for (const auto &parser : quota_parsers_) { - parser->GetRequirements(*request->attributes, &request->quotas); + parser->GetRequirements(*attributes, "as); } } diff --git a/src/istio/control/http/service_context.h b/src/istio/control/http/service_context.h index ee446fe2f46..78e9b52f7d9 100644 --- a/src/istio/control/http/service_context.h +++ b/src/istio/control/http/service_context.h @@ -38,16 +38,18 @@ class ServiceContext { } // Add static mixer attributes. - void AddStaticAttributes(RequestContext* request) const; + void AddStaticAttributes(::istio::mixer::v1::Attributes* attributes) const; // Inject a header that contains the static forwarded attributes. void InjectForwardedAttributes(HeaderUpdate* header_update) const; // Add api attributes from api_spec. - void AddApiAttributes(CheckData* check_data, RequestContext* request) const; + void AddApiAttributes(CheckData* check_data, + ::istio::mixer::v1::Attributes* attributes) const; // Add quota requirements from quota configs. - void AddQuotas(RequestContext* request) const; + void AddQuotas(::istio::mixer::v1::Attributes* attributes, + std::vector<::istio::quota_config::Requirement>& quotas) const; bool enable_mixer_check() const { return service_config_ && !service_config_->disable_check_calls(); diff --git a/src/istio/control/mock_mixer_client.h b/src/istio/control/mock_mixer_client.h index b7367271370..93e54a0c74d 100644 --- a/src/istio/control/mock_mixer_client.h +++ b/src/istio/control/mock_mixer_client.h @@ -25,13 +25,13 @@ namespace control { // The mock object for MixerClient interface. class MockMixerClient : public ::istio::mixerclient::MixerClient { public: - MOCK_METHOD4( - Check, ::istio::mixerclient::CancelFunc( - const ::istio::mixer::v1::Attributes& attributes, - const std::vector<::istio::quota_config::Requirement>& quotas, - ::istio::mixerclient::TransportCheckFunc transport, - ::istio::mixerclient::CheckDoneFunc on_done)); - MOCK_METHOD1(Report, void(const ::istio::mixer::v1::Attributes& attributes)); + MOCK_METHOD3(Check, + void(::istio::mixerclient::CheckContextSharedPtr& context, + const ::istio::mixerclient::TransportCheckFunc& transport, + const ::istio::mixerclient::CheckDoneFunc& on_done)); + MOCK_METHOD1( + Report, + void(const istio::mixerclient::SharedAttributesSharedPtr& attributes)); MOCK_CONST_METHOD1(GetStatistics, void(::istio::mixerclient::Statistics* stat)); }; diff --git a/src/istio/control/request_context.h b/src/istio/control/request_context.h deleted file mode 100644 index 5e655abbba0..00000000000 --- a/src/istio/control/request_context.h +++ /dev/null @@ -1,49 +0,0 @@ -/* Copyright 2017 Istio Authors. All Rights Reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ISTIO_CONTROL_REQUEST_CONTEXT_H -#define ISTIO_CONTROL_REQUEST_CONTEXT_H - -#include "google/protobuf/arena.h" -#include "google/protobuf/stubs/status.h" -#include "include/istio/quota_config/requirement.h" -#include "mixer/v1/attributes.pb.h" - -#include - -namespace istio { -namespace control { - -// The context to hold request data for both HTTP and TCP. -struct RequestContext { - RequestContext() { - attributes = - google::protobuf::Arena::CreateMessage<::istio::mixer::v1::Attributes>( - &arena_); - } - // protobuf arena - google::protobuf::Arena arena_; - // The attributes for both Check and Report. - ::istio::mixer::v1::Attributes* attributes; - // The quota requirements - std::vector<::istio::quota_config::Requirement> quotas; - // The check status. - ::google::protobuf::util::Status check_status; -}; - -} // namespace control -} // namespace istio - -#endif // ISTIO_CONTROL_REQUEST_CONTEXT_H diff --git a/src/istio/control/tcp/attributes_builder.cc b/src/istio/control/tcp/attributes_builder.cc index 0c8876e7923..782d0bdf6d1 100644 --- a/src/istio/control/tcp/attributes_builder.cc +++ b/src/istio/control/tcp/attributes_builder.cc @@ -14,10 +14,10 @@ */ #include "src/istio/control/tcp/attributes_builder.h" -#include "src/istio/utils/utils.h" - +#include "google/protobuf/stubs/status.h" #include "include/istio/utils/attribute_names.h" #include "include/istio/utils/attributes_builder.h" +#include "src/istio/utils/utils.h" namespace istio { namespace control { @@ -30,7 +30,7 @@ const std::string kConnectionClose("close"); } // namespace void AttributesBuilder::ExtractCheckAttributes(CheckData *check_data) { - utils::AttributesBuilder builder(request_->attributes); + utils::AttributesBuilder builder(attributes_); std::string source_ip; int source_port; @@ -81,9 +81,10 @@ void AttributesBuilder::ExtractCheckAttributes(CheckData *check_data) { } void AttributesBuilder::ExtractReportAttributes( - ReportData *report_data, ReportData::ConnectionEvent event, + const ::google::protobuf::util::Status &status, ReportData *report_data, + ReportData::ConnectionEvent event, ReportData::ReportInfo *last_report_info) { - utils::AttributesBuilder builder(request_->attributes); + utils::AttributesBuilder builder(attributes_); ReportData::ReportInfo info; report_data->GetReportInfo(&info); @@ -99,11 +100,11 @@ void AttributesBuilder::ExtractReportAttributes( if (event == ReportData::ConnectionEvent::CLOSE) { builder.AddDuration(utils::AttributeName::kConnectionDuration, info.duration); - if (!request_->check_status.ok()) { + if (status != ::google::protobuf::util::Status::UNKNOWN && !status.ok()) { builder.AddInt64(utils::AttributeName::kCheckErrorCode, - request_->check_status.error_code()); + status.error_code()); builder.AddString(utils::AttributeName::kCheckErrorMessage, - request_->check_status.ToString()); + status.ToString()); } builder.AddString(utils::AttributeName::kConnectionEvent, kConnectionClose); } else if (event == ReportData::ConnectionEvent::OPEN) { diff --git a/src/istio/control/tcp/attributes_builder.h b/src/istio/control/tcp/attributes_builder.h index 9b0f8004230..e69a7498a6d 100644 --- a/src/istio/control/tcp/attributes_builder.h +++ b/src/istio/control/tcp/attributes_builder.h @@ -18,7 +18,7 @@ #include "include/istio/control/tcp/check_data.h" #include "include/istio/control/tcp/report_data.h" -#include "src/istio/control/request_context.h" +#include "mixer/v1/attributes.pb.h" namespace istio { namespace control { @@ -27,18 +27,19 @@ namespace tcp { // The builder class to add TCP attributes. class AttributesBuilder { public: - AttributesBuilder(RequestContext* request) : request_(request) {} + AttributesBuilder(istio::mixer::v1::Attributes* attributes) + : attributes_(attributes) {} // Extract attributes for Check. void ExtractCheckAttributes(CheckData* check_data); // Extract attributes for Report. - void ExtractReportAttributes(ReportData* report_data, + void ExtractReportAttributes(const ::google::protobuf::util::Status& status, + ReportData* report_data, ReportData::ConnectionEvent event, ReportData::ReportInfo* last_report_info); private: - // The request context object. - RequestContext* request_; + istio::mixer::v1::Attributes* attributes_; }; } // namespace tcp diff --git a/src/istio/control/tcp/attributes_builder_test.cc b/src/istio/control/tcp/attributes_builder_test.cc index 897b7652d0e..5c9c4ab9119 100644 --- a/src/istio/control/tcp/attributes_builder_test.cc +++ b/src/istio/control/tcp/attributes_builder_test.cc @@ -14,7 +14,7 @@ */ #include "src/istio/control/tcp/attributes_builder.h" - +#include "google/protobuf/stubs/status.h" #include "google/protobuf/text_format.h" #include "google/protobuf/util/message_differencer.h" #include "gtest/gtest.h" @@ -419,9 +419,9 @@ attributes { } )"; -void ClearContextTime(RequestContext *request) { +void ClearContextTime(istio::mixer::v1::Attributes *attributes) { // Override timestamp with - - utils::AttributesBuilder builder(request->attributes); + utils::AttributesBuilder builder(attributes); std::chrono::time_point time0; builder.AddTimestamp(utils::AttributeName::kContextTime, time0); } @@ -452,21 +452,20 @@ TEST(AttributesBuilderTest, TestCheckAttributes) { *name = "www.google.com"; return true; })); - RequestContext request; - AttributesBuilder builder(&request); + istio::mixer::v1::Attributes attributes; + AttributesBuilder builder(&attributes); builder.ExtractCheckAttributes(&mock_data); - ClearContextTime(&request); + ClearContextTime(&attributes); std::string out_str; - TextFormat::PrintToString(*request.attributes, &out_str); + TextFormat::PrintToString(attributes, &out_str); GOOGLE_LOG(INFO) << "===" << out_str << "==="; ::istio::mixer::v1::Attributes expected_attributes; ASSERT_TRUE( TextFormat::ParseFromString(kCheckAttributes, &expected_attributes)); - EXPECT_TRUE( - MessageDifferencer::Equals(*request.attributes, expected_attributes)); + EXPECT_TRUE(MessageDifferencer::Equals(attributes, expected_attributes)); } TEST(AttributesBuilderTest, TestReportAttributes) { @@ -487,6 +486,7 @@ TEST(AttributesBuilderTest, TestReportAttributes) { listval.mutable_list_value()->add_values()->set_string_value("c"); (*struct_obj.mutable_fields())["list"] = listval; filter_metadata["foo.bar.com"] = struct_obj; + filter_metadata["istio.mixer"] = struct_obj; // to be ignored EXPECT_CALL(mock_data, GetDestinationIpPort(_, _)) .Times(4) @@ -527,77 +527,80 @@ TEST(AttributesBuilderTest, TestReportAttributes) { info->duration = std::chrono::nanoseconds(4); })); - RequestContext request; - request.check_status = ::google::protobuf::util::Status( + istio::mixer::v1::Attributes attributes; + AttributesBuilder builder(&attributes); + auto check_status = ::google::protobuf::util::Status( ::google::protobuf::util::error::INVALID_ARGUMENT, "Invalid argument"); - AttributesBuilder builder(&request); ReportData::ReportInfo last_report_info{0ULL, 0ULL, std::chrono::nanoseconds::zero()}; // Verify first open report - builder.ExtractReportAttributes(&mock_data, ReportData::ConnectionEvent::OPEN, + builder.ExtractReportAttributes(check_status, &mock_data, + ReportData::ConnectionEvent::OPEN, &last_report_info); - ClearContextTime(&request); + ClearContextTime(&attributes); std::string out_str; - TextFormat::PrintToString(*request.attributes, &out_str); + TextFormat::PrintToString(attributes, &out_str); GOOGLE_LOG(INFO) << "===" << out_str << "==="; ::istio::mixer::v1::Attributes expected_open_attributes; ASSERT_TRUE(TextFormat::ParseFromString(kFirstReportAttributes, &expected_open_attributes)); - EXPECT_TRUE(MessageDifferencer::Equals(*request.attributes, - expected_open_attributes)); + EXPECT_TRUE(MessageDifferencer::Equals(attributes, expected_open_attributes)); EXPECT_EQ(0, last_report_info.received_bytes); EXPECT_EQ(0, last_report_info.send_bytes); // Verify delta one report - builder.ExtractReportAttributes( - &mock_data, ReportData::ConnectionEvent::CONTINUE, &last_report_info); - ClearContextTime(&request); + builder.ExtractReportAttributes(check_status, &mock_data, + ReportData::ConnectionEvent::CONTINUE, + &last_report_info); + ClearContextTime(&attributes); - TextFormat::PrintToString(*request.attributes, &out_str); + TextFormat::PrintToString(attributes, &out_str); GOOGLE_LOG(INFO) << "===" << out_str << "==="; ::istio::mixer::v1::Attributes expected_delta_attributes; ASSERT_TRUE(TextFormat::ParseFromString(kDeltaOneReportAttributes, &expected_delta_attributes)); - EXPECT_TRUE(MessageDifferencer::Equals(*request.attributes, - expected_delta_attributes)); + EXPECT_TRUE( + MessageDifferencer::Equals(attributes, expected_delta_attributes)); EXPECT_EQ(100, last_report_info.received_bytes); EXPECT_EQ(200, last_report_info.send_bytes); // Verify delta two report - builder.ExtractReportAttributes( - &mock_data, ReportData::ConnectionEvent::CONTINUE, &last_report_info); - ClearContextTime(&request); + builder.ExtractReportAttributes(check_status, &mock_data, + ReportData::ConnectionEvent::CONTINUE, + &last_report_info); + ClearContextTime(&attributes); out_str.clear(); - TextFormat::PrintToString(*request.attributes, &out_str); + TextFormat::PrintToString(attributes, &out_str); GOOGLE_LOG(INFO) << "===" << out_str << "==="; expected_delta_attributes.Clear(); ASSERT_TRUE(TextFormat::ParseFromString(kDeltaTwoReportAttributes, &expected_delta_attributes)); - EXPECT_TRUE(MessageDifferencer::Equals(*request.attributes, - expected_delta_attributes)); + EXPECT_TRUE( + MessageDifferencer::Equals(attributes, expected_delta_attributes)); EXPECT_EQ(201, last_report_info.received_bytes); EXPECT_EQ(404, last_report_info.send_bytes); // Verify final report - builder.ExtractReportAttributes( - &mock_data, ReportData::ConnectionEvent::CLOSE, &last_report_info); - ClearContextTime(&request); + builder.ExtractReportAttributes(check_status, &mock_data, + ReportData::ConnectionEvent::CLOSE, + &last_report_info); + ClearContextTime(&attributes); out_str.clear(); - TextFormat::PrintToString(*request.attributes, &out_str); + TextFormat::PrintToString(attributes, &out_str); GOOGLE_LOG(INFO) << "===" << out_str << "==="; ::istio::mixer::v1::Attributes expected_final_attributes; ASSERT_TRUE(TextFormat::ParseFromString(kReportAttributes, &expected_final_attributes)); - EXPECT_TRUE(MessageDifferencer::Equals(*request.attributes, - expected_final_attributes)); + EXPECT_TRUE( + MessageDifferencer::Equals(attributes, expected_final_attributes)); } } // namespace diff --git a/src/istio/control/tcp/client_context.h b/src/istio/control/tcp/client_context.h index 38072694aed..ea20dc64317 100644 --- a/src/istio/control/tcp/client_context.h +++ b/src/istio/control/tcp/client_context.h @@ -20,7 +20,6 @@ #include "include/istio/quota_config/config_parser.h" #include "include/istio/utils/local_attributes.h" #include "src/istio/control/client_context_base.h" -#include "src/istio/control/request_context.h" namespace istio { namespace control { @@ -51,18 +50,20 @@ class ClientContext : public ClientContextBase { } // Add static mixer attributes. - void AddStaticAttributes(RequestContext* request) const { - AddLocalNodeAttributes(request->attributes); + void AddStaticAttributes(::istio::mixer::v1::Attributes* attributes) const { + AddLocalNodeAttributes(attributes); if (config_.has_mixer_attributes()) { - request->attributes->MergeFrom(config_.mixer_attributes()); + attributes->MergeFrom(config_.mixer_attributes()); } } // Add quota requirements from quota configs. - void AddQuotas(RequestContext* request) const { + void AddQuotas( + ::istio::mixer::v1::Attributes* attributes, + std::vector<::istio::quota_config::Requirement>& quotas) const { if (quota_parser_) { - quota_parser_->GetRequirements(*request->attributes, &request->quotas); + quota_parser_->GetRequirements(*attributes, "as); } } diff --git a/src/istio/control/tcp/request_handler_impl.cc b/src/istio/control/tcp/request_handler_impl.cc index f9b7b25d59c..4abb3b70780 100644 --- a/src/istio/control/tcp/request_handler_impl.cc +++ b/src/istio/control/tcp/request_handler_impl.cc @@ -28,29 +28,45 @@ namespace tcp { RequestHandlerImpl::RequestHandlerImpl( std::shared_ptr client_context) - : client_context_(client_context), + : attributes_(new istio::mixerclient::SharedAttributes()), + check_context_(new istio::mixerclient::CheckContext( + client_context->Retries(), client_context->NetworkFailOpen(), + attributes_)), + client_context_(client_context), last_report_info_{0ULL, 0ULL, std::chrono::nanoseconds::zero()} {} -CancelFunc RequestHandlerImpl::Check(CheckData* check_data, - CheckDoneFunc on_done) { +void RequestHandlerImpl::Check(CheckData* check_data, + const CheckDoneFunc& on_done) { if (client_context_->enable_mixer_check() || client_context_->enable_mixer_report()) { - client_context_->AddStaticAttributes(&request_context_); + client_context_->AddStaticAttributes(attributes_->attributes()); - AttributesBuilder builder(&request_context_); + AttributesBuilder builder(attributes_->attributes()); builder.ExtractCheckAttributes(check_data); } if (!client_context_->enable_mixer_check()) { - CheckResponseInfo check_response_info; - check_response_info.response_status = Status::OK; - on_done(check_response_info); - return nullptr; + check_context_->setFinalStatus(Status::OK, false); + on_done(*check_context_); + return; } - client_context_->AddQuotas(&request_context_); + client_context_->AddQuotas(attributes_->attributes(), + check_context_->quotaRequirements()); + + client_context_->SendCheck(nullptr, on_done, check_context_); +} - return client_context_->SendCheck(nullptr, on_done, &request_context_); +void RequestHandlerImpl::ResetCancel() { + if (check_context_) { + check_context_->resetCancel(); + } +} + +void RequestHandlerImpl::CancelCheck() { + if (check_context_) { + check_context_->cancel(); + } } void RequestHandlerImpl::Report(ReportData* report_data, @@ -59,10 +75,11 @@ void RequestHandlerImpl::Report(ReportData* report_data, return; } - AttributesBuilder builder(&request_context_); - builder.ExtractReportAttributes(report_data, event, &last_report_info_); + AttributesBuilder builder(attributes_->attributes()); + builder.ExtractReportAttributes(check_context_->status(), report_data, event, + &last_report_info_); - client_context_->SendReport(request_context_); + client_context_->SendReport(attributes_); } } // namespace tcp diff --git a/src/istio/control/tcp/request_handler_impl.h b/src/istio/control/tcp/request_handler_impl.h index ba7f890bd2c..c702e6a18e2 100644 --- a/src/istio/control/tcp/request_handler_impl.h +++ b/src/istio/control/tcp/request_handler_impl.h @@ -17,8 +17,8 @@ #define ISTIO_CONTROL_TCP_REQUEST_HANDLER_IMPL_H #include "include/istio/control/tcp/request_handler.h" -#include "src/istio/control/request_context.h" #include "src/istio/control/tcp/client_context.h" +#include "src/istio/mixerclient/check_context.h" namespace istio { namespace control { @@ -30,17 +30,22 @@ class RequestHandlerImpl : public RequestHandler { RequestHandlerImpl(std::shared_ptr client_context); // Make a Check call. - ::istio::mixerclient::CancelFunc Check( - CheckData* check_data, - ::istio::mixerclient::CheckDoneFunc on_done) override; + void Check(CheckData* check_data, + const ::istio::mixerclient::CheckDoneFunc& on_done) override; + + void ResetCancel() override; + + void CancelCheck() override; // Make a Report call. void Report(ReportData* report_data, ReportData::ConnectionEvent event) override; private: - // The request context object. - RequestContext request_context_; + // memory for telemetry reports and policy checks. Telemetry only needs the + // shared attributes. + istio::mixerclient::SharedAttributesSharedPtr attributes_; + istio::mixerclient::CheckContextSharedPtr check_context_; // The client context object. std::shared_ptr client_context_; diff --git a/src/istio/control/tcp/request_handler_impl_test.cc b/src/istio/control/tcp/request_handler_impl_test.cc index c27763b3bc4..9c882e166e7 100644 --- a/src/istio/control/tcp/request_handler_impl_test.cc +++ b/src/istio/control/tcp/request_handler_impl_test.cc @@ -26,6 +26,7 @@ using ::google::protobuf::util::Status; using ::istio::mixer::v1::Attributes; using ::istio::mixer::v1::config::client::TcpClientConfig; using ::istio::mixerclient::CancelFunc; +using ::istio::mixerclient::CheckContextSharedPtr; using ::istio::mixerclient::CheckDoneFunc; using ::istio::mixerclient::CheckResponseInfo; using ::istio::mixerclient::DoneFunc; @@ -108,12 +109,12 @@ TEST_F(RequestHandlerImplTest, TestHandlerDisabledCheck) { EXPECT_CALL(mock_data, GetPrincipal(_, _)).Times(2); // Check should not be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)).Times(0); + EXPECT_CALL(*mock_client_, Check(_, _, _)).Times(0); client_config_.set_disable_check_calls(true); auto handler = controller_->CreateRequestHandler(); handler->Check(&mock_data, [](const CheckResponseInfo &info) { - EXPECT_TRUE(info.response_status.ok()); + EXPECT_TRUE(info.status().ok()); }); } @@ -123,17 +124,15 @@ TEST_F(RequestHandlerImplTest, TestHandlerCheck) { EXPECT_CALL(mock_data, GetPrincipal(_, _)).Times(2); // Check should be called. - EXPECT_CALL(*mock_client_, Check(_, _, _, _)) - .WillOnce(Invoke([](const Attributes &attributes, - const std::vector "as, - TransportCheckFunc transport, - CheckDoneFunc on_done) -> CancelFunc { - auto map = attributes.attributes(); + EXPECT_CALL(*mock_client_, Check(_, _, _)) + .WillOnce(Invoke([](CheckContextSharedPtr &context, + const TransportCheckFunc &transport, + const CheckDoneFunc &on_done) { + auto map = context->attributes()->attributes(); EXPECT_EQ(map["key1"].string_value(), "value1"); - EXPECT_EQ(quotas.size(), 1); - EXPECT_EQ(quotas[0].quota, "quota"); - EXPECT_EQ(quotas[0].charge, 5); - return nullptr; + EXPECT_EQ(context->quotaRequirements().size(), 1); + EXPECT_EQ(context->quotaRequirements()[0].quota, "quota"); + EXPECT_EQ(context->quotaRequirements()[0].charge, 5); })); auto handler = controller_->CreateRequestHandler(); diff --git a/src/istio/mixerclient/BUILD b/src/istio/mixerclient/BUILD index 6d5d8010b38..ca6e190af38 100644 --- a/src/istio/mixerclient/BUILD +++ b/src/istio/mixerclient/BUILD @@ -40,6 +40,7 @@ cc_library( "attribute_compressor.h", "check_cache.cc", "check_cache.h", + "check_context.h", "client_impl.cc", "client_impl.h", "global_dictionary.cc", @@ -50,6 +51,9 @@ cc_library( "referenced.h", "report_batch.cc", "report_batch.h", + "shared_attributes.h", + "status_util.cc", + "status_util.h", ], visibility = ["//visibility:public"], deps = [ @@ -59,6 +63,7 @@ cc_library( "//include/istio/utils:simple_lru_cache", "//src/istio/prefetch:quota_prefetch_lib", "//src/istio/utils:utils_lib", + "@com_google_absl//absl/strings", ], ) diff --git a/src/istio/mixerclient/attribute_compressor.cc b/src/istio/mixerclient/attribute_compressor.cc index 34c980ae35e..ec7a6af8353 100644 --- a/src/istio/mixerclient/attribute_compressor.cc +++ b/src/istio/mixerclient/attribute_compressor.cc @@ -140,6 +140,9 @@ class BatchCompressorImpl : public BatchCompressor { report_.add_default_words(word); } report_.set_global_word_count(global_dict_.size()); + report_.set_repeated_attributes_semantics( + mixer::v1:: + ReportRequest_RepeatedAttributesSemantics_INDEPENDENT_ENCODING); return report_; } diff --git a/src/istio/mixerclient/attribute_compressor_test.cc b/src/istio/mixerclient/attribute_compressor_test.cc index 614381bb16c..397905b0099 100644 --- a/src/istio/mixerclient/attribute_compressor_test.cc +++ b/src/istio/mixerclient/attribute_compressor_test.cc @@ -259,6 +259,7 @@ attributes { } default_words: "JWT-Token" global_word_count: 221 +repeated_attributes_semantics: INDEPENDENT_ENCODING )"; class AttributeCompressorTest : public ::testing::Test { diff --git a/src/istio/mixerclient/check_cache.cc b/src/istio/mixerclient/check_cache.cc index 2f10557ce2a..18a2cad2879 100644 --- a/src/istio/mixerclient/check_cache.cc +++ b/src/istio/mixerclient/check_cache.cc @@ -15,6 +15,7 @@ #include "src/istio/mixerclient/check_cache.h" #include "include/istio/utils/protobuf.h" +#include "src/istio/utils/logger.h" using namespace std::chrono; using ::google::protobuf::util::Status; @@ -147,9 +148,10 @@ Status CheckCache::CacheResponse(const Attributes &attributes, } utils::HashType signature; if (!referenced.Signature(attributes, "", &signature)) { - GOOGLE_LOG(ERROR) << "Response referenced mismatchs with request"; - GOOGLE_LOG(ERROR) << "Request attributes: " << attributes.DebugString(); - GOOGLE_LOG(ERROR) << "Referenced attributes: " << referenced.DebugString(); + MIXER_WARN( + "Response referenced does not match request. Request attributes: " + "%s. Referenced attributes: %s", + attributes.DebugString().c_str(), referenced.DebugString().c_str()); return ConvertRpcStatus(response.precondition().status()); } @@ -157,8 +159,8 @@ Status CheckCache::CacheResponse(const Attributes &attributes, utils::HashType hash = referenced.Hash(); if (referenced_map_.find(hash) == referenced_map_.end()) { referenced_map_[hash] = referenced; - GOOGLE_LOG(INFO) << "Add a new Referenced for check cache: " - << referenced.DebugString(); + MIXER_DEBUG("Add a new Referenced for check cache: %s", + referenced.DebugString().c_str()); } CheckLRUCache::ScopedLookup lookup(cache_.get(), signature); diff --git a/src/istio/mixerclient/check_cache.h b/src/istio/mixerclient/check_cache.h index e779b5ae47e..ad14f85eddf 100644 --- a/src/istio/mixerclient/check_cache.h +++ b/src/istio/mixerclient/check_cache.h @@ -25,7 +25,6 @@ #include #include "google/protobuf/stubs/status.h" -#include "include/istio/mixerclient/client.h" #include "include/istio/mixerclient/options.h" #include "include/istio/utils/simple_lru_cache.h" #include "include/istio/utils/simple_lru_cache_inl.h" @@ -54,9 +53,9 @@ class CheckCache { bool IsCacheHit() const; - ::google::protobuf::util::Status status() const { return status_; } + const ::google::protobuf::util::Status& status() const { return status_; } - ::istio::mixer::v1::RouteDirective route_directive() const { + const ::istio::mixer::v1::RouteDirective& route_directive() const { return route_directive_; } diff --git a/src/istio/mixerclient/check_context.h b/src/istio/mixerclient/check_context.h new file mode 100644 index 00000000000..38e36de0f14 --- /dev/null +++ b/src/istio/mixerclient/check_context.h @@ -0,0 +1,242 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "google/protobuf/arena.h" +#include "google/protobuf/stubs/status.h" +#include "include/istio/mixerclient/check_response.h" +#include "include/istio/mixerclient/environment.h" +#include "include/istio/quota_config/requirement.h" +#include "include/istio/utils/attribute_names.h" +#include "include/istio/utils/attributes_builder.h" +#include "mixer/v1/attributes.pb.h" +#include "mixer/v1/mixer.pb.h" +#include "src/istio/mixerclient/attribute_compressor.h" +#include "src/istio/mixerclient/check_cache.h" +#include "src/istio/mixerclient/quota_cache.h" +#include "src/istio/mixerclient/shared_attributes.h" +#include "src/istio/utils/logger.h" + +#include + +namespace istio { +namespace mixerclient { + +/** + * All memory for the upstream policy and quota checks should hang off of these + * objects. + */ +class CheckContext : public CheckResponseInfo { + public: + CheckContext(uint32_t retries, bool fail_open, + SharedAttributesSharedPtr& shared_attributes) + : shared_attributes_(shared_attributes), + fail_open_(fail_open), + max_retries_(retries) {} + + const istio::mixer::v1::Attributes* attributes() const { + return shared_attributes_->attributes(); + } + + const std::vector& quotaRequirements() + const { + return quota_requirements_; + } + std::vector& quotaRequirements() { + return quota_requirements_; + } + + // + // Policy Cache Checks + // + + bool policyCacheHit() const { return policy_cache_hit_; } + const google::protobuf::util::Status& policyStatus() const { + return policy_cache_result_.status(); + } + + void checkPolicyCache(CheckCache& policyCache) { + policyCache.Check(*shared_attributes_->attributes(), &policy_cache_result_); + policy_cache_hit_ = policy_cache_result_.IsCacheHit(); + } + + void updatePolicyCache(const google::protobuf::util::Status& status, + const istio::mixer::v1::CheckResponse& response) { + policy_cache_result_.SetResponse(status, *shared_attributes_->attributes(), + response); + } + + // + // Quota Cache Checks + // + + bool quotaCheckRequired() const { return !quota_requirements_.empty(); } + bool remoteQuotaRequestRequired() const { + return remote_quota_check_required_; + } + + void checkQuotaCache(QuotaCache& quotaCache) { + if (!quotaCheckRequired()) { + return; + } + + // + // Quota is removed from the quota cache iff there is a policy cache hit. If + // there is a policy cache miss, then a request has to be sent upstream + // anyways, so the quota will be decremented on the upstream response. + // + quotaCache.Check(*shared_attributes_->attributes(), quota_requirements_, + policyCacheHit(), "a_cache_result_); + + remote_quota_check_required_ = + quota_cache_result_.BuildRequest(allocRequestOnce()); + + quota_cache_hit_ = quota_cache_result_.IsCacheHit(); + } + + void updateQuotaCache(const google::protobuf::util::Status& status, + const istio::mixer::v1::CheckResponse& response) { + quota_cache_result_.SetResponse(status, *shared_attributes_->attributes(), + response); + } + + bool quotaCacheHit() const { return quota_cache_hit_; } + const google::protobuf::util::Status& quotaStatus() const { + return quota_cache_result_.status(); + } + + // + // Upstream request and response + // + + void compressRequest(const AttributeCompressor& compressor, + const std::string& deduplication_id) { + compressor.Compress(*shared_attributes_->attributes(), + allocRequestOnce()->mutable_attributes()); + request_->set_global_word_count(compressor.global_word_count()); + request_->set_deduplication_id(deduplication_id); + } + + bool networkFailOpen() const { return fail_open_; } + + const istio::mixer::v1::CheckRequest& request() { return *request_; } + + istio::mixer::v1::CheckResponse* response() { + if (!response_) { + response_ = google::protobuf::Arena::CreateMessage< + istio::mixer::v1::CheckResponse>(&shared_attributes_->arena()); + } + return response_; + } + + void setFinalStatus(const google::protobuf::util::Status& status, + bool add_report_attributes = true) { + if (add_report_attributes) { + utils::AttributesBuilder builder(shared_attributes_->attributes()); + builder.AddBool(utils::AttributeName::kCheckCacheHit, policy_cache_hit_); + builder.AddBool(utils::AttributeName::kQuotaCacheHit, quota_cache_hit_); + } + + final_status_ = status; + } + + // + // Policy gRPC request attempt, retry, and cancellation + // + + bool retryable() const { return retry_attempts_ < max_retries_; } + + uint32_t retryAttempt() const { return retry_attempts_; } + + void retry(uint32_t retry_ms, std::unique_ptr timer) { + retry_attempts_++; + retry_timer_ = std::move(timer); + retry_timer_->Start(retry_ms); + } + + void cancel() { + if (cancel_func_) { + MIXER_DEBUG("Cancelling check call"); + cancel_func_(); + cancel_func_ = nullptr; + } + + if (retry_timer_) { + MIXER_DEBUG("Cancelling retry"); + retry_timer_->Stop(); + retry_timer_ = nullptr; + } + } + + void setCancel(CancelFunc cancel_func) { cancel_func_ = cancel_func; } + + void resetCancel() { cancel_func_ = nullptr; } + + // + // CheckResponseInfo (exposed to the top-level filter) + // + + const google::protobuf::util::Status& status() const override { + return final_status_; + } + + const istio::mixer::v1::RouteDirective& routeDirective() const override { + return policy_cache_result_.route_directive(); + } + + private: + CheckContext(const CheckContext&) = delete; + void operator=(const CheckContext&) = delete; + + istio::mixer::v1::CheckRequest* allocRequestOnce() { + if (!request_) { + request_ = google::protobuf::Arena::CreateMessage< + istio::mixer::v1::CheckRequest>(&shared_attributes_->arena()); + } + + return request_; + } + + istio::mixerclient::SharedAttributesSharedPtr shared_attributes_; + std::vector quota_requirements_; + + bool quota_cache_hit_{false}; + bool policy_cache_hit_{false}; + + QuotaCache::CheckResult quota_cache_result_; + CheckCache::CheckResult policy_cache_result_; + + istio::mixer::v1::CheckRequest* request_{nullptr}; + istio::mixer::v1::CheckResponse* response_{nullptr}; + + bool fail_open_{false}; + bool remote_quota_check_required_{false}; + google::protobuf::util::Status final_status_{ + google::protobuf::util::Status::UNKNOWN}; + const uint32_t max_retries_; + uint32_t retry_attempts_{0}; + + // Calling this will cancel any currently outstanding gRPC request to mixer + // policy server. + CancelFunc cancel_func_{nullptr}; + + std::unique_ptr retry_timer_{nullptr}; +}; + +typedef std::shared_ptr CheckContextSharedPtr; + +} // namespace mixerclient +} // namespace istio diff --git a/src/istio/mixerclient/client_impl.cc b/src/istio/mixerclient/client_impl.cc index 2a563039f41..966b09b679c 100644 --- a/src/istio/mixerclient/client_impl.cc +++ b/src/istio/mixerclient/client_impl.cc @@ -14,8 +14,12 @@ */ #include "src/istio/mixerclient/client_impl.h" #include +#include +#include #include "include/istio/mixerclient/check_response.h" #include "include/istio/utils/protobuf.h" +#include "src/istio/mixerclient/status_util.h" +#include "src/istio/utils/logger.h" using ::google::protobuf::util::Status; using ::google::protobuf::util::error::Code; @@ -24,149 +28,316 @@ using ::istio::mixer::v1::CheckRequest; using ::istio::mixer::v1::CheckResponse; using ::istio::mixer::v1::ReportRequest; using ::istio::mixer::v1::ReportResponse; +using ::istio::mixerclient::CheckContextSharedPtr; +using ::istio::mixerclient::SharedAttributesSharedPtr; namespace istio { namespace mixerclient { MixerClientImpl::MixerClientImpl(const MixerClientOptions &options) : options_(options) { + timer_create_ = options.env.timer_create_func; check_cache_ = std::unique_ptr(new CheckCache(options.check_options)); report_batch_ = std::unique_ptr( new ReportBatch(options.report_options, options_.env.report_transport, - options.env.timer_create_func, compressor_)); + timer_create_, compressor_)); quota_cache_ = std::unique_ptr(new QuotaCache(options.quota_options)); if (options_.env.uuid_generate_func) { deduplication_id_base_ = options_.env.uuid_generate_func(); } - - total_check_calls_ = 0; - total_remote_check_calls_ = 0; - total_blocking_remote_check_calls_ = 0; - total_quota_calls_ = 0; - total_remote_quota_calls_ = 0; - total_blocking_remote_quota_calls_ = 0; } MixerClientImpl::~MixerClientImpl() {} -CancelFunc MixerClientImpl::Check( - const Attributes &attributes, - const std::vector<::istio::quota_config::Requirement> "as, - TransportCheckFunc transport, CheckDoneFunc on_done) { +uint32_t MixerClientImpl::RetryDelay(uint32_t retry_attempt) { + const uint32_t max_retry_ms = + std::min(options_.check_options.max_retry_ms, + options_.check_options.base_retry_ms * + static_cast(std::pow(2, retry_attempt))); + + std::uniform_int_distribution distribution( + options_.check_options.base_retry_ms, max_retry_ms); + + return distribution(rand_); +} + +void MixerClientImpl::Check(CheckContextSharedPtr &context, + const TransportCheckFunc &transport, + const CheckDoneFunc &on_done) { + // + // Always check the policy cache + // + + context->checkPolicyCache(*check_cache_); ++total_check_calls_; - std::unique_ptr check_result( - new CheckCache::CheckResult); - check_cache_->Check(attributes, check_result.get()); + MIXER_DEBUG("Policy cache hit=%s, status=%s", + context->policyCacheHit() ? "true" : "false", + context->policyStatus().ToString().c_str()); - CheckResponseInfo check_response_info; - check_response_info.is_check_cache_hit = check_result->IsCacheHit(); - check_response_info.response_status = check_result->status(); - check_response_info.route_directive = check_result->route_directive(); + if (context->policyCacheHit()) { + ++total_check_cache_hits_; - if (check_result->IsCacheHit() && !check_result->status().ok()) { - on_done(check_response_info); - return nullptr; + if (!context->policyStatus().ok()) { + // + // If the policy cache denies the request, immediately fail the request + // + ++total_check_cache_hit_denies_; + context->setFinalStatus(context->policyStatus()); + on_done(*context); + return; + } + + // + // If policy cache accepts the request and a quota check is not required, + // immediately accept the request. + // + ++total_check_cache_hit_accepts_; + if (!context->quotaCheckRequired()) { + context->setFinalStatus(context->policyStatus()); + on_done(*context); + return; + } + } else { + ++total_check_cache_misses_; } - if (!quotas.empty()) { + bool remote_quota_prefetch{false}; + + if (context->quotaCheckRequired()) { + context->checkQuotaCache(*quota_cache_); ++total_quota_calls_; - } - std::unique_ptr quota_result( - new QuotaCache::CheckResult); - // Only use quota cache if Check is using cache with OK status. - // Otherwise, a remote Check call may be rejected, but quota amounts were - // substracted from quota cache already. - quota_cache_->Check(attributes, quotas, check_result->IsCacheHit(), - quota_result.get()); - - auto arena = new google::protobuf::Arena; - CheckRequest *request = - google::protobuf::Arena::CreateMessage(arena); - bool quota_call = quota_result->BuildRequest(request); - check_response_info.is_quota_cache_hit = quota_result->IsCacheHit(); - check_response_info.response_status = quota_result->status(); - if (check_result->IsCacheHit() && quota_result->IsCacheHit()) { - on_done(check_response_info); - on_done = nullptr; - if (!quota_call) { - delete arena; - return nullptr; + + MIXER_DEBUG("Quota cache hit=%s, status=%s, remote_call=%s", + context->quotaCacheHit() ? "true" : "false", + context->quotaStatus().ToString().c_str(), + context->remoteQuotaRequestRequired() ? "true" : "false"); + + if (context->quotaCacheHit()) { + ++total_quota_cache_hits_; + if (context->quotaStatus().ok()) { + ++total_quota_cache_hit_accepts_; + } else { + ++total_quota_cache_hit_denies_; + } + + if (context->policyCacheHit()) { + // + // If both policy and quota caches are hit, we can call the completion + // handler now. However sometimes the quota cache's prefetch + // implementation will still need to send a request to the Mixer server + // in the background. + // + context->setFinalStatus(context->quotaStatus()); + on_done(*context); + remote_quota_prefetch = context->remoteQuotaRequestRequired(); + if (!remote_quota_prefetch) { + return; + } + } + } else { + ++total_quota_cache_misses_; } } - compressor_.Compress(attributes, request->mutable_attributes()); - request->set_global_word_count(compressor_.global_word_count()); - request->set_deduplication_id(deduplication_id_base_ + - std::to_string(deduplication_id_.fetch_add(1))); - - // Need to make a copy for processing the response for check cache. - Attributes *attributes_copy = - google::protobuf::Arena::CreateMessage(arena); - CheckResponse *response = - google::protobuf::Arena::CreateMessage(arena); - *attributes_copy = attributes; - // Lambda capture could not pass unique_ptr, use raw pointer. - CheckCache::CheckResult *raw_check_result = check_result.release(); - QuotaCache::CheckResult *raw_quota_result = quota_result.release(); - if (!transport) { - transport = options_.env.check_transport; + // TODO(jblatt) mjog thinks this is a big CPU hog. Look into it. + context->compressRequest( + compressor_, + deduplication_id_base_ + std::to_string(deduplication_id_.fetch_add(1))); + + // + // Classify and track reason for remote request + // + + ++total_remote_calls_; + + if (!context->policyCacheHit()) { + ++total_remote_check_calls_; } - // We are going to make a remote call now. - ++total_remote_check_calls_; - if (!quotas.empty()) { + + if (context->remoteQuotaRequestRequired()) { ++total_remote_quota_calls_; } - if (on_done) { - ++total_blocking_remote_check_calls_; - if (!quotas.empty()) { - ++total_blocking_remote_quota_calls_; - } + + if (remote_quota_prefetch) { + ++total_remote_quota_prefetch_calls_; } - return transport( - *request, response, - [this, attributes_copy, response, raw_check_result, raw_quota_result, - on_done, arena](const Status &status) { - raw_check_result->SetResponse(status, *attributes_copy, *response); - raw_quota_result->SetResponse(status, *attributes_copy, *response); - CheckResponseInfo check_response_info; - if (on_done) { - if (!raw_check_result->status().ok()) { - check_response_info.response_status = raw_check_result->status(); + RemoteCheck(context, transport ? transport : options_.env.check_transport, + remote_quota_prefetch ? nullptr : on_done); +} + +void MixerClientImpl::RemoteCheck(CheckContextSharedPtr context, + const TransportCheckFunc &transport, + const CheckDoneFunc &on_done) { + // + // This lambda and any lambdas it creates for retry will inc the ref count + // on the CheckContext shared pointer. + // + // The CheckDoneFunc is valid as long as the Filter object is valid. This + // has a lifespan similar to the CheckContext, but TODO(jblatt) it would be + // good to move this into the CheckContext anyways. + // + // The other captures (this/MixerClientImpl and TransportCheckFunc's + // references) have lifespans much greater than any individual transaction. + // + CancelFunc cancel_func = transport( + context->request(), context->response(), + [this, context, transport, on_done](const Status &status) { + context->resetCancel(); + + // + // Classify and track transport errors + // + + TransportResult result = TransportStatus(status); + + switch (result) { + case TransportResult::SUCCESS: + ++total_remote_call_successes_; + break; + case TransportResult::RESPONSE_TIMEOUT: + ++total_remote_call_timeouts_; + break; + case TransportResult::SEND_ERROR: + ++total_remote_call_send_errors_; + break; + case TransportResult::OTHER: + ++total_remote_call_other_errors_; + break; + } + + if (result != TransportResult::SUCCESS && context->retryable()) { + ++total_remote_call_retries_; + const uint32_t retry_ms = RetryDelay(context->retryAttempt()); + + MIXER_DEBUG("Retry %u in %u msec due to transport error=%s", + context->retryAttempt() + 1, retry_ms, + status.ToString().c_str()); + + context->retry(retry_ms, + timer_create_([this, context, transport, on_done]() { + RemoteCheck(context, transport, on_done); + })); + + return; + } + + // + // Update caches. This has the side-effect of updating + // status, so track those too + // + + if (!context->policyCacheHit()) { + context->updatePolicyCache(status, *context->response()); + + if (context->policyStatus().ok()) { + ++total_remote_check_accepts_; + } else { + ++total_remote_check_denies_; + } + } + + if (context->quotaCheckRequired()) { + context->updateQuotaCache(status, *context->response()); + + if (context->quotaStatus().ok()) { + ++total_remote_quota_accepts_; } else { - check_response_info.response_status = raw_quota_result->status(); + ++total_remote_quota_denies_; } - check_response_info.route_directive = - raw_check_result->route_directive(); - on_done(check_response_info); } - delete raw_check_result; - delete raw_quota_result; - delete arena; + + MIXER_DEBUG( + "CheckResult transport=%s, policy=%s, quota=%s, attempt=%u", + status.ToString().c_str(), + result == TransportResult::SUCCESS + ? context->policyStatus().ToString().c_str() + : "NA", + result == TransportResult::SUCCESS && context->quotaCheckRequired() + ? context->policyStatus().ToString().c_str() + : "NA", + context->retryAttempt()); + + // + // Determine final status for Filter::completeCheck(). This + // will send an error response to the downstream client if + // the final status is not Status::OK + // + + if (result != TransportResult::SUCCESS) { + if (context->networkFailOpen()) { + context->setFinalStatus(Status::OK); + } else { + context->setFinalStatus(status); + } + } else if (!context->quotaCheckRequired()) { + context->setFinalStatus(context->policyStatus()); + } else if (!context->policyStatus().ok()) { + context->setFinalStatus(context->policyStatus()); + } else { + context->setFinalStatus(context->quotaStatus()); + } + + if (on_done) { + on_done(*context); + } if (utils::InvalidDictionaryStatus(status)) { + // TODO(jblatt) verify this is threadsafe compressor_.ShrinkGlobalDictionary(); } }); + + context->setCancel([this, cancel_func]() { + ++total_remote_call_cancellations_; + cancel_func(); + }); } -void MixerClientImpl::Report(const Attributes &attributes) { +void MixerClientImpl::Report(const SharedAttributesSharedPtr &attributes) { report_batch_->Report(attributes); } void MixerClientImpl::GetStatistics(Statistics *stat) const { - stat->total_check_calls = total_check_calls_; - stat->total_remote_check_calls = total_remote_check_calls_; - stat->total_blocking_remote_check_calls = total_blocking_remote_check_calls_; - stat->total_quota_calls = total_quota_calls_; - stat->total_remote_quota_calls = total_remote_quota_calls_; - stat->total_blocking_remote_quota_calls = total_blocking_remote_quota_calls_; - stat->total_report_calls = report_batch_->total_report_calls(); - stat->total_remote_report_calls = report_batch_->total_remote_report_calls(); + stat->total_check_calls_ = total_check_calls_; + stat->total_check_cache_hits_ = total_check_cache_hits_; + stat->total_check_cache_misses_ = total_check_cache_misses_; + stat->total_check_cache_hit_accepts_ = total_check_cache_hit_accepts_; + stat->total_check_cache_hit_denies_ = total_check_cache_hit_denies_; + stat->total_remote_check_calls_ = total_remote_check_calls_; + stat->total_remote_check_accepts_ = total_remote_check_accepts_; + stat->total_remote_check_denies_ = total_remote_check_denies_; + stat->total_quota_calls_ = total_quota_calls_; + stat->total_quota_cache_hits_ = total_quota_cache_hits_; + stat->total_quota_cache_misses_ = total_quota_cache_misses_; + stat->total_quota_cache_hit_accepts_ = total_quota_cache_hit_accepts_; + stat->total_quota_cache_hit_denies_ = total_quota_cache_hit_denies_; + stat->total_remote_quota_calls_ = total_remote_quota_calls_; + stat->total_remote_quota_accepts_ = total_remote_quota_accepts_; + stat->total_remote_quota_denies_ = total_remote_quota_denies_; + stat->total_remote_quota_prefetch_calls_ = total_remote_quota_prefetch_calls_; + stat->total_remote_calls_ = total_remote_calls_; + stat->total_remote_call_successes_ = total_remote_call_successes_; + stat->total_remote_call_timeouts_ = total_remote_call_timeouts_; + stat->total_remote_call_send_errors_ = total_remote_call_send_errors_; + stat->total_remote_call_other_errors_ = total_remote_call_other_errors_; + stat->total_remote_call_retries_ = total_remote_call_retries_; + stat->total_remote_call_cancellations_ = total_remote_call_cancellations_; + + stat->total_report_calls_ = report_batch_->total_report_calls(); + stat->total_remote_report_calls_ = report_batch_->total_remote_report_calls(); + stat->total_remote_report_successes_ = + report_batch_->total_remote_report_successes(); + stat->total_remote_report_timeouts_ = + report_batch_->total_remote_report_timeouts(); + stat->total_remote_report_send_errors_ = + report_batch_->total_remote_report_send_errors(); + stat->total_remote_report_other_errors_ = + report_batch_->total_remote_report_other_errors(); } // Creates a MixerClient object. diff --git a/src/istio/mixerclient/client_impl.h b/src/istio/mixerclient/client_impl.h index d23ff15842e..4fa76032d06 100644 --- a/src/istio/mixerclient/client_impl.h +++ b/src/istio/mixerclient/client_impl.h @@ -23,6 +23,10 @@ #include "src/istio/mixerclient/report_batch.h" #include +#include + +using ::istio::mixerclient::CheckContextSharedPtr; +using ::istio::mixerclient::SharedAttributesSharedPtr; namespace istio { namespace mixerclient { @@ -35,21 +39,29 @@ class MixerClientImpl : public MixerClient { // Destructor virtual ~MixerClientImpl(); - CancelFunc Check( - const ::istio::mixer::v1::Attributes& attributes, - const std::vector<::istio::quota_config::Requirement>& quotas, - TransportCheckFunc transport, CheckDoneFunc on_done) override; - void Report(const ::istio::mixer::v1::Attributes& attributes) override; + void Check(CheckContextSharedPtr& context, + const TransportCheckFunc& transport, + const CheckDoneFunc& on_done) override; + + void Report(const SharedAttributesSharedPtr& attributes) override; void GetStatistics(Statistics* stat) const override; private: + void RemoteCheck(CheckContextSharedPtr context, + const TransportCheckFunc& transport, + const CheckDoneFunc& on_done); + + uint32_t RetryDelay(uint32_t retry_attempt); + // Store the options MixerClientOptions options_; // To compress attributes. AttributeCompressor compressor_; + // timer create func + TimerCreateFunc timer_create_; // Cache for Check call. std::unique_ptr check_cache_; // Report batch. @@ -57,21 +69,73 @@ class MixerClientImpl : public MixerClient { // Cache for Quota call. std::unique_ptr quota_cache_; + // RNG for retry jitter + std::default_random_engine rand_; + // for deduplication_id std::string deduplication_id_base_; std::atomic deduplication_id_; - // Atomic objects for recording statistics. - // check cache miss rate: - // total_blocking_remote_check_calls_ / total_check_calls_. - // quota cache miss rate: - // total_blocking_remote_quota_calls_ / total_quota_calls_. - std::atomic_int_fast64_t total_check_calls_; - std::atomic_int_fast64_t total_remote_check_calls_; - std::atomic_int_fast64_t total_blocking_remote_check_calls_; - std::atomic_int_fast64_t total_quota_calls_; - std::atomic_int_fast64_t total_remote_quota_calls_; - std::atomic_int_fast64_t total_blocking_remote_quota_calls_; + // + // Policy check counters. + // + // total_check_calls = total_check_hits + total_check_misses + // total_check_hits = total_check_hit_accepts + total_check_hit_denies + // total_remote_check_calls = total_check_misses + // total_remote_check_calls >= total_remote_check_accepts + + // total_remote_check_denies + // ^ Transport errors are responsible for the >= + // + + std::atomic total_check_calls_{0}; // 1.0 + std::atomic total_check_cache_hits_{0}; // 1.1 + std::atomic total_check_cache_misses_{0}; // 1.1 + std::atomic total_check_cache_hit_accepts_{0}; // 1.1 + std::atomic total_check_cache_hit_denies_{0}; // 1.1 + std::atomic total_remote_check_calls_{0}; // 1.0 + std::atomic total_remote_check_accepts_{0}; // 1.1 + std::atomic total_remote_check_denies_{0}; // 1.1 + + // + // Quota check counters + // + // total_quota_calls = total_quota_hits + total_quota_misses + // total_quota_hits >= total_quota_hit_accepts + total_quota_hit_denies + // ^ we will neither accept or deny from the quota cache if the policy + // cache is missed + // total_remote_quota_calls = total_quota_misses + total_quota_hit_denies + // ^ we will neither accept or deny from the quota cache if the policy + // cache is missed + // total_remote_quota_calls >= total_remote_quota_accepts + + // total_remote_quota_denies + // ^ Transport errors are responsible for the >= + // + + std::atomic total_quota_calls_{0}; // 1.0 + std::atomic total_quota_cache_hits_{0}; // 1.1 + std::atomic total_quota_cache_misses_{0}; // 1.1 + std::atomic total_quota_cache_hit_accepts_{0}; // 1.1 + std::atomic total_quota_cache_hit_denies_{0}; // 1.1 + std::atomic total_remote_quota_calls_{0}; // 1.0 + std::atomic total_remote_quota_accepts_{0}; // 1.1 + std::atomic total_remote_quota_denies_{0}; // 1.1 + std::atomic total_remote_quota_prefetch_calls_{0}; // 1.1 + + // + // Counters for upstream requests to Mixer. + // + // total_remote_calls = SUM(total_remote_call_successes, ..., + // total_remote_call_other_errors) Total transport errors would be + // (total_remote_calls - total_remote_call_successes). + // + + std::atomic total_remote_calls_{0}; // 1.1 + std::atomic total_remote_call_successes_{0}; // 1.1 + std::atomic total_remote_call_timeouts_{0}; // 1.1 + std::atomic total_remote_call_send_errors_{0}; // 1.1 + std::atomic total_remote_call_other_errors_{0}; // 1.1 + std::atomic total_remote_call_retries_{0}; // 1.1 + std::atomic total_remote_call_cancellations_{0}; // 1.1 GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(MixerClientImpl); }; diff --git a/src/istio/mixerclient/client_impl_test.cc b/src/istio/mixerclient/client_impl_test.cc index ee38af82753..befcf3d48fa 100644 --- a/src/istio/mixerclient/client_impl_test.cc +++ b/src/istio/mixerclient/client_impl_test.cc @@ -19,12 +19,14 @@ #include "include/istio/mixerclient/client.h" #include "include/istio/utils/attributes_builder.h" #include "src/istio/mixerclient/status_test_util.h" +#include "src/istio/utils/logger.h" using ::google::protobuf::util::Status; using ::google::protobuf::util::error::Code; using ::istio::mixer::v1::Attributes; using ::istio::mixer::v1::CheckRequest; using ::istio::mixer::v1::CheckResponse; +using ::istio::mixerclient::CheckContextSharedPtr; using ::istio::mixerclient::CheckResponseInfo; using ::istio::quota_config::Requirement; using ::testing::_; @@ -52,10 +54,10 @@ class MockCheckTransport { class MixerClientImplTest : public ::testing::Test { public: MixerClientImplTest() { - quotas_.push_back({kRequestCount, 1}); CreateClient(true /* check_cache */, true /* quota_cache */); } + protected: void CreateClient(bool check_cache, bool quota_cache) { MixerClientOptions options(CheckOptions(check_cache ? 1 : 0 /*entries */), ReportOptions(1, 1000), @@ -66,8 +68,79 @@ class MixerClientImplTest : public ::testing::Test { client_ = CreateMixerClient(options); } - Attributes request_; - std::vector quotas_; + void CheckStatisticsInvariants(const Statistics& stats) { + // + // Policy check counters. + // + // total_check_calls = total_check_hits + total_check_misses + // total_check_hits = total_check_hit_accepts + total_check_hit_denies + // total_remote_check_calls = total_check_misses + // total_remote_check_calls >= total_remote_check_accepts + + // total_remote_check_denies + // ^ Transport errors are responsible for the >= + // + + EXPECT_EQ(stats.total_check_calls_, + stats.total_check_cache_hits_ + stats.total_check_cache_misses_); + EXPECT_EQ(stats.total_check_cache_hits_, + stats.total_check_cache_hit_accepts_ + + stats.total_check_cache_hit_denies_); + EXPECT_EQ(stats.total_remote_check_calls_, stats.total_check_cache_misses_); + EXPECT_GE( + stats.total_remote_check_calls_, + stats.total_remote_check_accepts_ + stats.total_remote_check_denies_); + + // + // Quota check counters + // + // total_quota_calls = total_quota_hits + total_quota_misses + // total_quota_hits = total_quota_hit_accepts + total_quota_hit_denies + // total_remote_quota_calls = total_quota_misses + + // total_remote_quota_prefetch_calls total_remote_quota_calls >= + // total_remote_quota_accepts + total_remote_quota_denies + // ^ Transport errors are responsible for the >= + // + + EXPECT_EQ(stats.total_quota_calls_, + stats.total_quota_cache_hits_ + stats.total_quota_cache_misses_); + EXPECT_EQ(stats.total_quota_cache_hits_, + stats.total_quota_cache_hit_accepts_ + + stats.total_quota_cache_hit_denies_); + EXPECT_EQ(stats.total_remote_quota_calls_, + stats.total_quota_cache_misses_ + + stats.total_remote_quota_prefetch_calls_); + EXPECT_GE( + stats.total_remote_quota_calls_, + stats.total_remote_quota_accepts_ + stats.total_remote_quota_denies_); + + // + // Counters for upstream requests to Mixer. + // + // total_remote_calls = SUM(total_remote_call_successes, ..., + // total_remote_call_other_errors) Total transport errors would be + // (total_remote_calls - total_remote_call_successes). + // + + EXPECT_EQ(stats.total_remote_calls_, + stats.total_remote_call_successes_ + + stats.total_remote_call_timeouts_ + + stats.total_remote_call_send_errors_ + + stats.total_remote_call_other_errors_); + } + + CheckContextSharedPtr CreateContext(int quota_request) { + uint32_t retries{0}; + bool fail_open{false}; + istio::mixerclient::SharedAttributesSharedPtr attributes{ + new SharedAttributes()}; + istio::mixerclient::CheckContextSharedPtr context{ + new CheckContext(retries, fail_open, attributes)}; + if (0 < quota_request) { + context->quotaRequirements().push_back({kRequestCount, quota_request}); + } + return context; + } + std::unique_ptr client_; MockCheckTransport mock_check_transport_; TransportCheckFunc empty_transport_; @@ -81,36 +154,53 @@ TEST_F(MixerClientImplTest, TestSuccessCheck) { on_done(Status::OK); })); - // Not to test quota - std::vector empty_quotas; - CheckResponseInfo check_response_info; - client_->Check(request_, empty_quotas, empty_transport_, - [&check_response_info](const CheckResponseInfo& info) { - check_response_info.response_status = info.response_status; - }); - EXPECT_TRUE(check_response_info.response_status.ok()); + { + CheckContextSharedPtr context = CreateContext(0); + Status status; + client_->Check( + context, empty_transport_, + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_TRUE(status.ok()); + } - for (int i = 0; i < 10; i++) { - // Other calls should be cached. - CheckResponseInfo check_response_info1; - client_->Check(request_, empty_quotas, empty_transport_, - [&check_response_info1](const CheckResponseInfo& info) { - check_response_info1.response_status = - info.response_status; - }); - EXPECT_TRUE(check_response_info1.response_status.ok()); + for (size_t i = 0; i < 10; i++) { + CheckContextSharedPtr context = CreateContext(0); + Status status; + client_->Check( + context, empty_transport_, + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_TRUE(status.ok()); } Statistics stat; client_->GetStatistics(&stat); - EXPECT_EQ(stat.total_check_calls, 11); - // The first check call is a remote blocking check call. - EXPECT_EQ(stat.total_remote_check_calls, 1); - EXPECT_EQ(stat.total_blocking_remote_check_calls, 1); + CheckStatisticsInvariants(stat); + + EXPECT_EQ(stat.total_check_calls_, 11); + // The first check call misses the policy cache, the rest hit and are accepted + EXPECT_EQ(stat.total_check_cache_hits_, 10); + EXPECT_EQ(stat.total_check_cache_misses_, 1); + EXPECT_EQ(stat.total_check_cache_hit_accepts_, 10); + EXPECT_EQ(stat.total_check_cache_hit_denies_, 0); + EXPECT_EQ(stat.total_remote_check_calls_, 1); + EXPECT_EQ(stat.total_remote_check_accepts_, 1); + EXPECT_EQ(stat.total_remote_check_denies_, 0); // Empty quota does not trigger any quota call. - EXPECT_EQ(stat.total_quota_calls, 0); - EXPECT_EQ(stat.total_remote_quota_calls, 0); - EXPECT_EQ(stat.total_blocking_remote_quota_calls, 0); + EXPECT_EQ(stat.total_quota_calls_, 0); + EXPECT_EQ(stat.total_quota_cache_hits_, 0); + EXPECT_EQ(stat.total_quota_cache_misses_, 0); + EXPECT_EQ(stat.total_quota_cache_hit_accepts_, 0); + EXPECT_EQ(stat.total_quota_cache_hit_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_calls_, 0); + EXPECT_EQ(stat.total_remote_quota_accepts_, 0); + EXPECT_EQ(stat.total_remote_quota_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_prefetch_calls_, 0); + // Only one remote call and it succeeds + EXPECT_EQ(stat.total_remote_calls_, 1); + EXPECT_EQ(stat.total_remote_call_successes_, 1); + EXPECT_EQ(stat.total_remote_call_timeouts_, 0); + EXPECT_EQ(stat.total_remote_call_send_errors_, 0); + EXPECT_EQ(stat.total_remote_call_other_errors_, 0); } TEST_F(MixerClientImplTest, TestPerRequestTransport) { @@ -126,36 +216,53 @@ TEST_F(MixerClientImplTest, TestPerRequestTransport) { on_done(Status::OK); })); - // Not to test quota - std::vector empty_quotas; - CheckResponseInfo check_response_info; - client_->Check(request_, empty_quotas, local_check_transport.GetFunc(), - [&check_response_info](const CheckResponseInfo& info) { - check_response_info.response_status = info.response_status; - }); - EXPECT_TRUE(check_response_info.response_status.ok()); + { + CheckContextSharedPtr context = CreateContext(0); + Status status; + client_->Check( + context, local_check_transport.GetFunc(), + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_TRUE(status.ok()); + } - for (int i = 0; i < 10; i++) { - // Other calls should be cached. - CheckResponseInfo check_response_info1; - client_->Check(request_, empty_quotas, local_check_transport.GetFunc(), - [&check_response_info1](const CheckResponseInfo& info) { - check_response_info1.response_status = - info.response_status; - }); - EXPECT_TRUE(check_response_info1.response_status.ok()); + for (size_t i = 0; i < 10; i++) { + CheckContextSharedPtr context = CreateContext(0); + Status status; + client_->Check( + context, local_check_transport.GetFunc(), + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_TRUE(status.ok()); } Statistics stat; client_->GetStatistics(&stat); - EXPECT_EQ(stat.total_check_calls, 11); - // The first check call is a remote blocking check call. - EXPECT_EQ(stat.total_remote_check_calls, 1); - EXPECT_EQ(stat.total_blocking_remote_check_calls, 1); + CheckStatisticsInvariants(stat); + + EXPECT_EQ(stat.total_check_calls_, 11); + // The first check call misses the policy cache, the rest hit and are accepted + EXPECT_EQ(stat.total_check_cache_hits_, 10); + EXPECT_EQ(stat.total_check_cache_misses_, 1); + EXPECT_EQ(stat.total_check_cache_hit_accepts_, 10); + EXPECT_EQ(stat.total_check_cache_hit_denies_, 0); + EXPECT_EQ(stat.total_remote_check_calls_, 1); + EXPECT_EQ(stat.total_remote_check_accepts_, 1); + EXPECT_EQ(stat.total_remote_check_denies_, 0); // Empty quota does not trigger any quota call. - EXPECT_EQ(stat.total_quota_calls, 0); - EXPECT_EQ(stat.total_remote_quota_calls, 0); - EXPECT_EQ(stat.total_blocking_remote_quota_calls, 0); + EXPECT_EQ(stat.total_quota_calls_, 0); + EXPECT_EQ(stat.total_quota_cache_hits_, 0); + EXPECT_EQ(stat.total_quota_cache_misses_, 0); + EXPECT_EQ(stat.total_quota_cache_hit_accepts_, 0); + EXPECT_EQ(stat.total_quota_cache_hit_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_calls_, 0); + EXPECT_EQ(stat.total_remote_quota_accepts_, 0); + EXPECT_EQ(stat.total_remote_quota_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_prefetch_calls_, 0); + // Only one remote call and it succeeds + EXPECT_EQ(stat.total_remote_calls_, 1); + EXPECT_EQ(stat.total_remote_call_successes_, 1); + EXPECT_EQ(stat.total_remote_call_timeouts_, 0); + EXPECT_EQ(stat.total_remote_call_send_errors_, 0); + EXPECT_EQ(stat.total_remote_call_other_errors_, 0); } TEST_F(MixerClientImplTest, TestNoCheckCache) { @@ -174,34 +281,57 @@ TEST_F(MixerClientImplTest, TestNoCheckCache) { on_done(Status::OK); })); - CheckResponseInfo check_response_info; - client_->Check(request_, quotas_, empty_transport_, - [&check_response_info](const CheckResponseInfo& info) { - check_response_info.response_status = info.response_status; - }); - EXPECT_TRUE(check_response_info.response_status.ok()); + { + CheckContextSharedPtr context = CreateContext(1); + Status status; + client_->Check( + context, empty_transport_, + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_TRUE(status.ok()); + } - for (int i = 0; i < 10; i++) { + for (size_t i = 0; i < 10; i++) { // Other calls are not cached. - CheckResponseInfo check_response_info1; - client_->Check(request_, quotas_, empty_transport_, - [&check_response_info1](const CheckResponseInfo& info) { - check_response_info1.response_status = - info.response_status; - }); - EXPECT_TRUE(check_response_info1.response_status.ok()); + CheckContextSharedPtr context = CreateContext(1); + Status status; + client_->Check( + context, empty_transport_, + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_TRUE(status.ok()); } // Call count 11 since check is not cached. EXPECT_EQ(call_counts, 11); Statistics stat; client_->GetStatistics(&stat); - // Because there is no check cache, we make remote blocking call every time. - EXPECT_EQ(stat.total_check_calls, 11); - EXPECT_EQ(stat.total_remote_check_calls, 11); - EXPECT_EQ(stat.total_blocking_remote_check_calls, 11); - EXPECT_EQ(stat.total_quota_calls, 11); - EXPECT_EQ(stat.total_remote_quota_calls, 11); - EXPECT_EQ(stat.total_blocking_remote_quota_calls, 11); + CheckStatisticsInvariants(stat); + + EXPECT_EQ(stat.total_check_calls_, 11); + EXPECT_EQ(stat.total_check_cache_hits_, 0); + EXPECT_EQ(stat.total_check_cache_misses_, 11); + EXPECT_EQ(stat.total_check_cache_hit_accepts_, 0); + EXPECT_EQ(stat.total_check_cache_hit_denies_, 0); + EXPECT_EQ(stat.total_remote_check_calls_, 11); + EXPECT_EQ(stat.total_remote_check_accepts_, 11); + EXPECT_EQ(stat.total_remote_check_denies_, 0); + // + // The current quota cache impl forces a cache miss whenever the check cache + // is missed. + // + EXPECT_EQ(stat.total_quota_calls_, 11); + EXPECT_EQ(stat.total_quota_cache_hits_, 0); + EXPECT_EQ(stat.total_quota_cache_misses_, 11); + EXPECT_EQ(stat.total_quota_cache_hit_accepts_, 0); + EXPECT_EQ(stat.total_quota_cache_hit_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_calls_, 11); + EXPECT_EQ(stat.total_remote_quota_accepts_, 11); + EXPECT_EQ(stat.total_remote_quota_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_prefetch_calls_, 0); + // And all remote quota calls succeed + EXPECT_EQ(stat.total_remote_calls_, 11); + EXPECT_EQ(stat.total_remote_call_successes_, 11); + EXPECT_EQ(stat.total_remote_call_timeouts_, 0); + EXPECT_EQ(stat.total_remote_call_send_errors_, 0); + EXPECT_EQ(stat.total_remote_call_other_errors_, 0); } TEST_F(MixerClientImplTest, TestNoQuotaCache) { @@ -211,43 +341,65 @@ TEST_F(MixerClientImplTest, TestNoQuotaCache) { EXPECT_CALL(mock_check_transport_, Check(_, _, _)) .WillRepeatedly(Invoke([&](const CheckRequest& request, CheckResponse* response, DoneFunc on_done) { + auto request_quotas = request.quotas(); + auto requested_amount = request_quotas[kRequestCount].amount(); response->mutable_precondition()->set_valid_use_count(1000); CheckResponse::QuotaResult quota_result; - quota_result.set_granted_amount(10); + quota_result.set_granted_amount(requested_amount); quota_result.mutable_valid_duration()->set_seconds(10); (*response->mutable_quotas())[kRequestCount] = quota_result; call_counts++; on_done(Status::OK); })); - CheckResponseInfo check_response_info; - client_->Check(request_, quotas_, empty_transport_, - [&check_response_info](const CheckResponseInfo& info) { - check_response_info.response_status = info.response_status; - }); - EXPECT_TRUE(check_response_info.response_status.ok()); + { + CheckContextSharedPtr context = CreateContext(1); + Status status; + client_->Check( + context, empty_transport_, + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_TRUE(status.ok()); + } - for (int i = 0; i < 10; i++) { + for (size_t i = 0; i < 10; i++) { // Other calls should be cached. - CheckResponseInfo check_response_info1; - client_->Check(request_, quotas_, empty_transport_, - [&check_response_info1](const CheckResponseInfo& info) { - check_response_info1.response_status = - info.response_status; - }); - EXPECT_TRUE(check_response_info1.response_status.ok()); + CheckContextSharedPtr context = CreateContext(1); + Status status; + client_->Check( + context, empty_transport_, + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_TRUE(status.ok()); } // Call count 11 since quota is not cached. EXPECT_EQ(call_counts, 11); Statistics stat; client_->GetStatistics(&stat); - // Because there is no quota cache, we make remote blocking call every time. - EXPECT_EQ(stat.total_check_calls, 11); - EXPECT_EQ(stat.total_remote_check_calls, 11); - EXPECT_EQ(stat.total_blocking_remote_check_calls, 11); - EXPECT_EQ(stat.total_quota_calls, 11); - EXPECT_EQ(stat.total_remote_quota_calls, 11); - EXPECT_EQ(stat.total_blocking_remote_quota_calls, 11); + CheckStatisticsInvariants(stat); + + EXPECT_EQ(stat.total_check_calls_, 11); + // The first check call misses the policy cache, the rest hit and are accepted + EXPECT_EQ(stat.total_check_cache_hits_, 10); + EXPECT_EQ(stat.total_check_cache_misses_, 1); + EXPECT_EQ(stat.total_check_cache_hit_accepts_, 10); + EXPECT_EQ(stat.total_check_cache_hit_denies_, 0); + EXPECT_EQ(stat.total_remote_check_calls_, 1); + EXPECT_EQ(stat.total_remote_check_accepts_, 1); + EXPECT_EQ(stat.total_remote_check_denies_, 0); + EXPECT_EQ(stat.total_quota_calls_, 11); + EXPECT_EQ(stat.total_quota_cache_hits_, 0); + EXPECT_EQ(stat.total_quota_cache_misses_, 11); + EXPECT_EQ(stat.total_quota_cache_hit_accepts_, 0); + EXPECT_EQ(stat.total_quota_cache_hit_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_calls_, 11); + EXPECT_EQ(stat.total_remote_quota_accepts_, 11); + EXPECT_EQ(stat.total_remote_quota_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_prefetch_calls_, 0); + // And all remote quota calls succeed + EXPECT_EQ(stat.total_remote_calls_, 11); + EXPECT_EQ(stat.total_remote_call_successes_, 11); + EXPECT_EQ(stat.total_remote_call_timeouts_, 0); + EXPECT_EQ(stat.total_remote_call_send_errors_, 0); + EXPECT_EQ(stat.total_remote_call_other_errors_, 0); } TEST_F(MixerClientImplTest, TestSuccessCheckAndQuota) { @@ -255,44 +407,75 @@ TEST_F(MixerClientImplTest, TestSuccessCheckAndQuota) { EXPECT_CALL(mock_check_transport_, Check(_, _, _)) .WillRepeatedly(Invoke([&](const CheckRequest& request, CheckResponse* response, DoneFunc on_done) { + auto request_quotas = request.quotas(); + auto requested_amount = request_quotas[kRequestCount].amount(); response->mutable_precondition()->set_valid_use_count(1000); CheckResponse::QuotaResult quota_result; - quota_result.set_granted_amount(10); + quota_result.set_granted_amount(requested_amount); quota_result.mutable_valid_duration()->set_seconds(10); (*response->mutable_quotas())[kRequestCount] = quota_result; call_counts++; on_done(Status::OK); })); - CheckResponseInfo check_response_info; - client_->Check(request_, quotas_, empty_transport_, - [&check_response_info](const CheckResponseInfo& info) { - check_response_info.response_status = info.response_status; - }); - EXPECT_TRUE(check_response_info.response_status.ok()); + // quota cache starts with 1 resource. by requesting exactly 1 the request + // will be satisfied by the cache and a background request will be initiated + // to store 2 more in the cache + { + CheckContextSharedPtr context = CreateContext(1); + Status status; + client_->Check( + context, empty_transport_, + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_TRUE(status.ok()); + } - for (int i = 0; i < 10; i++) { - // Other calls should be cached. - CheckResponseInfo check_response_info1; - client_->Check(request_, quotas_, empty_transport_, - [&check_response_info1](const CheckResponseInfo& info) { - check_response_info1.response_status = - info.response_status; - }); - EXPECT_TRUE(check_response_info1.response_status.ok()); + // Half of the requests from now on will be satisfied by the cache but require + // background refills. + for (size_t i = 0; i < 100; i++) { + CheckContextSharedPtr context = CreateContext(1); + Status status; + client_->Check( + context, empty_transport_, + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_TRUE(status.ok()); } - // Call count should be less than 4 - EXPECT_LE(call_counts, 3); + + // The number of remote prefetch calls should be less than or equal to the + // current prefetch impl's value of 6. Decreases are of course acceptable, + // but increases should be allowed only with a good reason. + int expected_prefetchs = 6; + + EXPECT_EQ(call_counts, 1 + expected_prefetchs); Statistics stat; client_->GetStatistics(&stat); - // Less than 4 remote calls are made for prefetching, and they are - // non-blocking remote calls. - EXPECT_EQ(stat.total_check_calls, 11); - EXPECT_LE(stat.total_remote_check_calls, 3); - EXPECT_EQ(stat.total_blocking_remote_check_calls, 1); - EXPECT_EQ(stat.total_quota_calls, 11); - EXPECT_LE(stat.total_remote_quota_calls, 3); - EXPECT_EQ(stat.total_blocking_remote_quota_calls, 1); + CheckStatisticsInvariants(stat); + + EXPECT_EQ(stat.total_check_calls_, 101); + // The first check call misses the policy cache, the rest hit and are accepted + EXPECT_EQ(stat.total_check_cache_hits_, 100); + EXPECT_EQ(stat.total_check_cache_misses_, 1); + EXPECT_EQ(stat.total_check_cache_hit_accepts_, 100); + EXPECT_EQ(stat.total_check_cache_hit_denies_, 0); + EXPECT_EQ(stat.total_remote_check_calls_, 1); + EXPECT_EQ(stat.total_remote_check_accepts_, 1); + EXPECT_EQ(stat.total_remote_check_denies_, 0); + // Quota cache is always hit because of the quota prefetch mechanism. + EXPECT_EQ(stat.total_quota_calls_, 101); + EXPECT_EQ(stat.total_quota_cache_hits_, 100); + EXPECT_EQ(stat.total_quota_cache_misses_, 1); + EXPECT_EQ(stat.total_quota_cache_hit_accepts_, 100); + EXPECT_EQ(stat.total_quota_cache_hit_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_calls_, 1 + expected_prefetchs); + EXPECT_EQ(stat.total_remote_quota_accepts_, 1 + expected_prefetchs); + EXPECT_EQ(stat.total_remote_quota_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_prefetch_calls_, expected_prefetchs); + // And all remote quota calls succeed + EXPECT_EQ(stat.total_remote_calls_, 1 + expected_prefetchs); + EXPECT_EQ(stat.total_remote_call_successes_, 1 + expected_prefetchs); + EXPECT_EQ(stat.total_remote_call_timeouts_, 0); + EXPECT_EQ(stat.total_remote_call_send_errors_, 0); + EXPECT_EQ(stat.total_remote_call_other_errors_, 0); } TEST_F(MixerClientImplTest, TestFailedCheckAndQuota) { @@ -309,35 +492,54 @@ TEST_F(MixerClientImplTest, TestFailedCheckAndQuota) { on_done(Status::OK); })); - CheckResponseInfo check_response_info; - client_->Check(request_, quotas_, empty_transport_, - [&check_response_info](const CheckResponseInfo& info) { - check_response_info.response_status = info.response_status; - }); - EXPECT_ERROR_CODE(Code::FAILED_PRECONDITION, - check_response_info.response_status); + { + CheckContextSharedPtr context = CreateContext(1); + Status status; + client_->Check( + context, empty_transport_, + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_ERROR_CODE(Code::FAILED_PRECONDITION, status); + } - for (int i = 0; i < 10; i++) { + for (size_t i = 0; i < 10; i++) { // Other calls should be cached. - CheckResponseInfo check_response_info1; - client_->Check(request_, quotas_, empty_transport_, - [&check_response_info1](const CheckResponseInfo& info) { - check_response_info1.response_status = - info.response_status; - }); - EXPECT_ERROR_CODE(Code::FAILED_PRECONDITION, - check_response_info1.response_status); + CheckContextSharedPtr context = CreateContext(1); + Status status; + client_->Check( + context, empty_transport_, + [&status](const CheckResponseInfo& info) { status = info.status(); }); + EXPECT_ERROR_CODE(Code::FAILED_PRECONDITION, status); } Statistics stat; client_->GetStatistics(&stat); + CheckStatisticsInvariants(stat); + + EXPECT_EQ(stat.total_check_calls_, 11); // The first call is a remote blocking call, which returns failed precondition // in check response. Following calls only make check cache calls and return. - EXPECT_EQ(stat.total_check_calls, 11); - EXPECT_EQ(stat.total_remote_check_calls, 1); - EXPECT_EQ(stat.total_blocking_remote_check_calls, 1); - EXPECT_EQ(stat.total_quota_calls, 1); - EXPECT_EQ(stat.total_remote_quota_calls, 1); - EXPECT_EQ(stat.total_blocking_remote_quota_calls, 1); + EXPECT_EQ(stat.total_check_cache_hits_, 10); + EXPECT_EQ(stat.total_check_cache_misses_, 1); + EXPECT_EQ(stat.total_check_cache_hit_accepts_, 0); + EXPECT_EQ(stat.total_check_cache_hit_denies_, 10); + EXPECT_EQ(stat.total_remote_check_calls_, 1); + EXPECT_EQ(stat.total_remote_check_accepts_, 0); + EXPECT_EQ(stat.total_remote_check_denies_, 1); + // If the check cache denies the request, the quota cache never sees it. + EXPECT_EQ(stat.total_quota_calls_, 1); + EXPECT_EQ(stat.total_quota_cache_hits_, 0); + EXPECT_EQ(stat.total_quota_cache_misses_, 1); + EXPECT_EQ(stat.total_quota_cache_hit_accepts_, 0); + EXPECT_EQ(stat.total_quota_cache_hit_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_calls_, 1); + EXPECT_EQ(stat.total_remote_quota_accepts_, 1); + EXPECT_EQ(stat.total_remote_quota_denies_, 0); + EXPECT_EQ(stat.total_remote_quota_prefetch_calls_, 0); + // Only one remote call and it succeeds at the transport level + EXPECT_EQ(stat.total_remote_calls_, 1); + EXPECT_EQ(stat.total_remote_call_successes_, 1); + EXPECT_EQ(stat.total_remote_call_timeouts_, 0); + EXPECT_EQ(stat.total_remote_call_send_errors_, 0); + EXPECT_EQ(stat.total_remote_call_other_errors_, 0); } } // namespace diff --git a/src/istio/mixerclient/quota_cache.cc b/src/istio/mixerclient/quota_cache.cc index 4d77a038fff..4cb6f904494 100644 --- a/src/istio/mixerclient/quota_cache.cc +++ b/src/istio/mixerclient/quota_cache.cc @@ -15,6 +15,7 @@ #include "src/istio/mixerclient/quota_cache.h" #include "include/istio/utils/protobuf.h" +#include "src/istio/utils/logger.h" using namespace std::chrono; using ::google::protobuf::util::Status; @@ -96,6 +97,7 @@ bool QuotaCache::CheckResult::BuildRequest(CheckRequest* request) { } } if (!rejected_quota_names.empty()) { + MIXER_DEBUG("Quota is exhausted for: %s", rejected_quota_names.c_str()); status_ = Status(Code::RESOURCE_EXHAUSTED, std::string("Quota is exhausted for: ") + rejected_quota_names); @@ -118,8 +120,8 @@ void QuotaCache::CheckResult::SetResponse(const Status& status, if (it != quotas.end()) { result = &it->second; } else { - GOOGLE_LOG(ERROR) - << "Quota response did not have quota for: " << quota.name; + MIXER_WARN("Quota response did not have quota for: %s", + quota.name.c_str()); } } if (!quota.response_func(attributes, result)) { @@ -131,6 +133,7 @@ void QuotaCache::CheckResult::SetResponse(const Status& status, } } if (!rejected_quota_names.empty()) { + MIXER_DEBUG("Quota is exhausted for: %s", rejected_quota_names.c_str()); status_ = Status(Code::RESOURCE_EXHAUSTED, std::string("Quota is exhausted for: ") + rejected_quota_names); @@ -218,9 +221,10 @@ void QuotaCache::SetResponse(const Attributes& attributes, utils::HashType signature; if (!referenced.Signature(attributes, quota_name, &signature)) { - GOOGLE_LOG(ERROR) << "Quota response referenced mismatchs with request"; - GOOGLE_LOG(ERROR) << "Request attributes: " << attributes.DebugString(); - GOOGLE_LOG(ERROR) << "Referenced attributes: " << referenced.DebugString(); + MIXER_WARN( + "Quota response referenced does not match request. Request " + "attributes: %s, Referenced attributes: %s", + attributes.DebugString().c_str(), referenced.DebugString().c_str()); return; } @@ -235,8 +239,8 @@ void QuotaCache::SetResponse(const Attributes& attributes, utils::HashType hash = referenced.Hash(); if (quota_ref.referenced_map.find(hash) == quota_ref.referenced_map.end()) { quota_ref.referenced_map[hash] = referenced; - GOOGLE_LOG(INFO) << "Add a new Referenced for quota cache: " << quota_name - << ", reference: " << referenced.DebugString(); + MIXER_DEBUG("Add a new Referenced for quota cache: %s, reference: %s", + quota_name.c_str(), referenced.DebugString().c_str()); } cache_->Insert(signature, quota_ref.pending_item.release(), 1); diff --git a/src/istio/mixerclient/quota_cache.h b/src/istio/mixerclient/quota_cache.h index 231e4324795..a667b9b089a 100644 --- a/src/istio/mixerclient/quota_cache.h +++ b/src/istio/mixerclient/quota_cache.h @@ -22,8 +22,10 @@ #include #include -#include "include/istio/mixerclient/client.h" +#include "google/protobuf/stubs/status.h" +#include "include/istio/mixerclient/options.h" #include "include/istio/prefetch/quota_prefetch.h" +#include "include/istio/quota_config/requirement.h" #include "include/istio/utils/simple_lru_cache.h" #include "include/istio/utils/simple_lru_cache_inl.h" #include "src/istio/mixerclient/referenced.h" @@ -57,7 +59,7 @@ class QuotaCache { bool IsCacheHit() const; - ::google::protobuf::util::Status status() const { return status_; } + const ::google::protobuf::util::Status& status() const { return status_; } void SetResponse(const ::google::protobuf::util::Status& status, const ::istio::mixer::v1::Attributes& attributes, diff --git a/src/istio/mixerclient/report_batch.cc b/src/istio/mixerclient/report_batch.cc index 5bb07ef436f..7d344089eec 100644 --- a/src/istio/mixerclient/report_batch.cc +++ b/src/istio/mixerclient/report_batch.cc @@ -15,6 +15,8 @@ #include "src/istio/mixerclient/report_batch.h" #include "include/istio/utils/protobuf.h" +#include "src/istio/mixerclient/status_util.h" +#include "src/istio/utils/logger.h" using ::google::protobuf::util::Status; using ::google::protobuf::util::error::Code; @@ -25,6 +27,9 @@ using ::istio::mixer::v1::ReportResponse; namespace istio { namespace mixerclient { +static std::atomic REPORT_FAIL_LOG_MESSAGES{0}; +static constexpr uint32_t REPORT_FAIL_LOG_MODULUS{100}; + ReportBatch::ReportBatch(const ReportOptions& options, TransportReportFunc transport, TimerCreateFunc timer_create, @@ -39,10 +44,11 @@ ReportBatch::ReportBatch(const ReportOptions& options, ReportBatch::~ReportBatch() { Flush(); } -void ReportBatch::Report(const Attributes& request) { +void ReportBatch::Report( + const istio::mixerclient::SharedAttributesSharedPtr& attributes) { std::lock_guard lock(mutex_); ++total_report_calls_; - batch_compressor_->Add(request); + batch_compressor_->Add(*attributes->attributes()); if (batch_compressor_->size() >= options_.max_batch_entries) { FlushWithLock(); } else { @@ -66,11 +72,41 @@ void ReportBatch::FlushWithLock() { ++total_remote_report_calls_; auto request = batch_compressor_->Finish(); - ReportResponse* response = new ReportResponse; - transport_(request, response, [this, response](const Status& status) { - delete response; + std::shared_ptr response{new ReportResponse()}; + + // TODO(jblatt) I replaced a ReportResponse raw pointer with a shared + // pointer so at least the memory will be freed if this lambda is deleted + // without being called, but really this should be a unique_ptr that is + // moved into the transport_ and then moved into the lambda if invoked. + transport_(request, &*response, [this, response](const Status& status) { + // + // Classify and track transport errors + // + + TransportResult result = TransportStatus(status); + + switch (result) { + case TransportResult::SUCCESS: + ++total_remote_report_successes_; + break; + case TransportResult::RESPONSE_TIMEOUT: + ++total_remote_report_timeouts_; + break; + case TransportResult::SEND_ERROR: + ++total_remote_report_send_errors_; + break; + case TransportResult::OTHER: + ++total_remote_report_other_errors_; + break; + } + if (!status.ok()) { - GOOGLE_LOG(ERROR) << "Mixer Report failed with: " << status.ToString(); + if (MIXER_WARN_ENABLED && + 0 == REPORT_FAIL_LOG_MESSAGES++ % REPORT_FAIL_LOG_MODULUS) { + MIXER_WARN("Mixer Report failed with: %s", status.ToString().c_str()); + } else { + MIXER_DEBUG("Mixer Report failed with: %s", status.ToString().c_str()); + } if (utils::InvalidDictionaryStatus(status)) { compressor_.ShrinkGlobalDictionary(); } diff --git a/src/istio/mixerclient/report_batch.h b/src/istio/mixerclient/report_batch.h index 156129ee00a..3ce9aaca004 100644 --- a/src/istio/mixerclient/report_batch.h +++ b/src/istio/mixerclient/report_batch.h @@ -34,7 +34,7 @@ class ReportBatch { virtual ~ReportBatch(); // Make batched report call. - void Report(const ::istio::mixer::v1::Attributes& request); + void Report(const istio::mixerclient::SharedAttributesSharedPtr& attributes); // Flush out batched reports. void Flush(); @@ -44,6 +44,22 @@ class ReportBatch { return total_remote_report_calls_; } + uint64_t total_remote_report_successes() const { + return total_remote_report_successes_; + } + + uint64_t total_remote_report_timeouts() const { + return total_remote_report_timeouts_; + } + + uint64_t total_remote_report_send_errors() const { + return total_remote_report_send_errors_; + } + + uint64_t total_remote_report_other_errors() const { + return total_remote_report_other_errors_; + } + private: void FlushWithLock(); @@ -68,8 +84,12 @@ class ReportBatch { // batched report compressor std::unique_ptr batch_compressor_; - std::atomic_int_fast64_t total_report_calls_; - std::atomic_int_fast64_t total_remote_report_calls_; + std::atomic total_report_calls_{0}; // 1.0 + std::atomic total_remote_report_calls_{0}; // 1.0 + std::atomic total_remote_report_successes_{0}; // 1.1 + std::atomic total_remote_report_timeouts_{0}; // 1.1 + std::atomic total_remote_report_send_errors_{0}; // 1.1 + std::atomic total_remote_report_other_errors_{0}; // 1.1 GOOGLE_DISALLOW_EVIL_CONSTRUCTORS(ReportBatch); }; diff --git a/src/istio/mixerclient/report_batch_test.cc b/src/istio/mixerclient/report_batch_test.cc index 06cc7156ded..59e2a917e8e 100644 --- a/src/istio/mixerclient/report_batch_test.cc +++ b/src/istio/mixerclient/report_batch_test.cc @@ -83,7 +83,8 @@ TEST_F(ReportBatchTest, TestBatchDisabled) { Invoke([](const ReportRequest& request, ReportResponse* response, DoneFunc on_done) { on_done(Status::OK); })); - Attributes report; + istio::mixerclient::SharedAttributesSharedPtr report{ + new istio::mixerclient::SharedAttributes()}; batch_->Report(report); } @@ -96,7 +97,8 @@ TEST_F(ReportBatchTest, TestBatchReport) { on_done(Status::OK); })); - Attributes report; + istio::mixerclient::SharedAttributesSharedPtr report{ + new istio::mixerclient::SharedAttributes()}; for (int i = 0; i < 10; ++i) { batch_->Report(report); } @@ -115,7 +117,8 @@ TEST_F(ReportBatchTest, TestBatchReportWithTimeout) { on_done(Status::OK); })); - Attributes report; + istio::mixerclient::SharedAttributesSharedPtr report{ + new istio::mixerclient::SharedAttributes()}; batch_->Report(report); EXPECT_EQ(report_call_count, 0); diff --git a/src/istio/mixerclient/shared_attributes.h b/src/istio/mixerclient/shared_attributes.h new file mode 100644 index 00000000000..418c1fabc22 --- /dev/null +++ b/src/istio/mixerclient/shared_attributes.h @@ -0,0 +1,49 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "google/protobuf/arena.h" +#include "mixer/v1/attributes.pb.h" + +namespace istio { +namespace mixerclient { + +/** + * Attributes shared by the policy/quota check requests and telemetry requests + * sent to the Mixer server. + */ +class SharedAttributes { + public: + SharedAttributes() + : attributes_(google::protobuf::Arena::CreateMessage< + ::istio::mixer::v1::Attributes>(&arena_)) {} + + const ::istio::mixer::v1::Attributes* attributes() const { + return attributes_; + } + ::istio::mixer::v1::Attributes* attributes() { return attributes_; } + + google::protobuf::Arena& arena() { return arena_; } + + private: + google::protobuf::Arena arena_; + ::istio::mixer::v1::Attributes* attributes_; +}; + +typedef std::shared_ptr SharedAttributesSharedPtr; + +} // namespace mixerclient +} // namespace istio diff --git a/src/istio/mixerclient/status_util.cc b/src/istio/mixerclient/status_util.cc new file mode 100644 index 00000000000..01ab0e5c517 --- /dev/null +++ b/src/istio/mixerclient/status_util.cc @@ -0,0 +1,49 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/istio/mixerclient/status_util.h" +#include "absl/strings/match.h" +#include "absl/strings/string_view.h" + +namespace istio { +namespace mixerclient { + +static constexpr absl::string_view TIMEOUT_MESSAGE{"upstream request timeout"}; +static constexpr absl::string_view SEND_ERROR_MESSAGE{ + "upstream connect error or disconnect/reset before headers"}; + +TransportResult TransportStatus( + const ::google::protobuf::util::Status &status) { + if (status.ok()) { + return TransportResult::SUCCESS; + } + + if (::google::protobuf::util::error::Code::UNAVAILABLE == + status.error_code()) { + absl::string_view error_message{status.error_message().data(), + static_cast( + status.error_message().length())}; + if (absl::StartsWith(error_message, TIMEOUT_MESSAGE)) { + return TransportResult::RESPONSE_TIMEOUT; + } + if (absl::StartsWith(error_message, SEND_ERROR_MESSAGE)) { + return TransportResult::SEND_ERROR; + } + } + + return TransportResult::OTHER; +} +} // namespace mixerclient +} // namespace istio diff --git a/src/istio/mixerclient/status_util.h b/src/istio/mixerclient/status_util.h new file mode 100644 index 00000000000..843d2745764 --- /dev/null +++ b/src/istio/mixerclient/status_util.h @@ -0,0 +1,35 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "google/protobuf/stubs/status.h" + +namespace istio { +namespace mixerclient { + +enum class TransportResult { + SUCCESS, // Response received + SEND_ERROR, // Cannot connect to peer or send request to peer. + RESPONSE_TIMEOUT, // Connected to peer and sent request, but didn't receive a + // response in time. + OTHER // Something else went wrong +}; + +extern TransportResult TransportStatus( + const ::google::protobuf::util::Status &status); + +} // namespace mixerclient +} // namespace istio diff --git a/src/istio/prefetch/BUILD b/src/istio/prefetch/BUILD index ded66da7c2e..56d6ecef739 100644 --- a/src/istio/prefetch/BUILD +++ b/src/istio/prefetch/BUILD @@ -25,6 +25,7 @@ cc_library( visibility = ["//visibility:public"], deps = [ "//include/istio/prefetch:headers_lib", + "//src/istio/utils:utils_lib", ], ) diff --git a/src/istio/prefetch/quota_prefetch.cc b/src/istio/prefetch/quota_prefetch.cc index ec244d2bc72..62ae5642658 100644 --- a/src/istio/prefetch/quota_prefetch.cc +++ b/src/istio/prefetch/quota_prefetch.cc @@ -16,28 +16,12 @@ #include "include/istio/prefetch/quota_prefetch.h" #include "src/istio/prefetch/circular_queue.h" #include "src/istio/prefetch/time_based_counter.h" +#include "src/istio/utils/logger.h" #include using namespace std::chrono; -// Turn this on to debug for quota_prefetch_test.cc -// Not for debugging in production. -#if 0 -#include -#define LOG(t) \ - std::cerr << "(" \ - << duration_cast(t.time_since_epoch()).count() \ - << "):" -#else -// Pipe to stringstream to disable logging. -#include -std::ostringstream os; -#define LOG(t) \ - os.clear(); \ - os -#endif - namespace istio { namespace prefetch { namespace { @@ -168,6 +152,10 @@ void QuotaPrefetchImpl::AttemptPrefetch(int amount, Tick t) { int avail = CountAvailable(t); int pass_count = counter_.Count(t); int desired = std::max(pass_count, options_.min_prefetch_amount); + MIXER_TRACE( + "Prefetch decision: available=%d, desired=%d, inflight_count=%d, " + "requested=%d", + avail, desired, inflight_count_, amount); if ((avail < desired / 2 && inflight_count_ == 0) || avail < amount) { bool use_not_granted = (avail == 0 && mode_ == OPEN); Prefetch(std::max(amount, desired), use_not_granted, t); @@ -181,7 +169,7 @@ void QuotaPrefetchImpl::Prefetch(int req_amount, bool use_not_granted, Tick t) { slot_id = Add(req_amount, t + milliseconds(kMaxExpirationInMs)); } - LOG(t) << "Prefetch: " << req_amount << ", id: " << slot_id << std::endl; + MIXER_DEBUG("Prefetch amount %d for slotid: %lu", req_amount, slot_id); last_prefetch_time_ = t; ++inflight_count_; @@ -225,7 +213,7 @@ int QuotaPrefetchImpl::Substract(int delta, Tick t) { } } else { if (n->available > 0) { - LOG(t) << "Expired:" << n->available << std::endl; + MIXER_DEBUG("Expired: %d", n->available); } } queue_.Pop(); @@ -240,9 +228,8 @@ void QuotaPrefetchImpl::OnResponse(SlotId slot_id, int req_amount, std::lock_guard lock(mutex_); --inflight_count_; - LOG(t) << "OnResponse: req:" << req_amount << ", resp: " << resp_amount - << ", expire: " << expiration.count() << ", id: " << slot_id - << std::endl; + MIXER_DEBUG("OnResponse: req: %d, resp: %d, expire: %ld, id: %lu", req_amount, + resp_amount, expiration.count(), slot_id); // resp_amount of -1 indicates any network failures. // Use fail open policy to handle any netowrk failures. @@ -301,7 +288,7 @@ bool QuotaPrefetchImpl::Check(int amount, Tick t) { } } if (!ret) { - LOG(t) << "Rejected amount: " << amount << std::endl; + MIXER_DEBUG("Rejected amount: %d", amount); } return ret; } diff --git a/src/istio/utils/BUILD b/src/istio/utils/BUILD index 3efb4634c0e..1eb3b2f6d4c 100644 --- a/src/istio/utils/BUILD +++ b/src/istio/utils/BUILD @@ -18,11 +18,13 @@ cc_library( name = "utils_lib", srcs = [ "local_attributes.cc", + "logger.cc", "protobuf.cc", "status.cc", "utils.cc", ], hdrs = [ + "logger.h", "utils.h", ], visibility = ["//visibility:public"], @@ -60,6 +62,16 @@ cc_test( ], ) +cc_test( + name = "logger_test", + size = "small", + srcs = ["logger_test.cc"], + deps = [ + ":utils_lib", + "//external:googletest_main", + ], +) + cc_library( name = "attribute_names_lib", srcs = [ diff --git a/src/istio/utils/attribute_names.cc b/src/istio/utils/attribute_names.cc index 392bc1c45ef..31c3fc9c542 100644 --- a/src/istio/utils/attribute_names.cc +++ b/src/istio/utils/attribute_names.cc @@ -25,6 +25,14 @@ const char AttributeName::kSourceNamespace[] = "source.namespace"; const char AttributeName::kSourceUID[] = "source.uid"; const char AttributeName::kDestinationPrincipal[] = "destination.principal"; +const char AttributeName::kDestinationServiceName[] = + "destination.service.name"; +const char AttributeName::kDestinationServiceUID[] = "destination.service.uid"; +const char AttributeName::kDestinationServiceHost[] = + "destination.service.host"; +const char AttributeName::kDestinationServiceNamespace[] = + "destination.service.namespace"; + const char AttributeName::kRequestHeaders[] = "request.headers"; const char AttributeName::kRequestHost[] = "request.host"; const char AttributeName::kRequestMethod[] = "request.method"; diff --git a/src/istio/utils/logger.cc b/src/istio/utils/logger.cc new file mode 100644 index 00000000000..f5b32c6981e --- /dev/null +++ b/src/istio/utils/logger.cc @@ -0,0 +1,87 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/istio/utils/logger.h" +#include + +namespace istio { +namespace utils { + +Logger::~Logger() {} + +void Logger::log(Level level, const char *format, ...) { + if (!isLoggable(level)) { + return; + } + + va_list args; + va_start(args, format); + char buffer[256]; + ::vsnprintf(buffer, sizeof(buffer), format, args); + buffer[sizeof(buffer) - 1] = 0; + va_end(args); + + writeBuffer(level, buffer); +} + +// This is equivalent to the original mixer client logger, but is not used when +// mixer client is used inside Envoy. This preserves mixer client's +// independence of the Envoy source code without forcing it to log (infrequenty) +// to stdout. +class DefaultLogger : public Logger { + protected: + virtual bool isLoggable(Level level) override { + switch (level) { + case Level::TRACE_: + case Level::DEBUG_: + return false; + case Level::INFO_: + case Level::WARN_: + case Level::ERROR_: + return true; + } + } + + virtual void writeBuffer(Level level, const char *buffer) override { + fprintf(stderr, "%s %s\n", levelString(level), buffer); + } + + private: + const char *levelString(Level level) { + switch (level) { + case Level::TRACE_: + return "TRACE"; + case Level::DEBUG_: + return "DEBUG"; + case Level::INFO_: + return "INFO"; + case Level::WARN_: + return "WARN"; + case Level::ERROR_: + return "ERROR"; + } + } +}; + +static std::unique_ptr active_logger{new DefaultLogger()}; + +void setLogger(std::unique_ptr logger) { + active_logger = std::move(logger); + MIXER_INFO("Logger active"); +} +Logger &getLogger() { return *active_logger; } + +} // namespace utils +} // namespace istio diff --git a/src/istio/utils/logger.h b/src/istio/utils/logger.h new file mode 100644 index 00000000000..b06baa30622 --- /dev/null +++ b/src/istio/utils/logger.h @@ -0,0 +1,107 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +namespace istio { +namespace utils { + +class Logger { + public: + virtual ~Logger(); + + enum class Level { TRACE_, DEBUG_, INFO_, WARN_, ERROR_ }; + + void log(Level level, const char *format, ...); + + virtual bool isLoggable(Level level) = 0; + + protected: + virtual void writeBuffer(Level level, const char *buffer) = 0; +}; + +extern void setLogger(std::unique_ptr logger); +extern Logger &getLogger(); + +} // namespace utils +} // namespace istio + +#define STRINGLIT2(x) #x +#define STRINGLIT(x) STRINGLIT2(x) +#define FILE_LINE "[" __FILE__ ":" STRINGLIT(__LINE__) "] " + +#define MIXER_TRACE_ENABLED \ + (istio::utils::getLogger().isLoggable(istio::utils::Logger::Level::TRACE_)) +#define MIXER_DEBUG_ENABLED \ + (istio::utils::getLogger().isLoggable(istio::utils::Logger::Level::DEBUG_)) +#define MIXER_INFO_ENABLED \ + (istio::utils::getLogger().isLoggable(istio::utils::Logger::Level::INFO_)) +#define MIXER_WARN_ENABLED \ + (istio::utils::getLogger().isLoggable(istio::utils::Logger::Level::WARN_)) +#define MIXER_ERROR_ENABLED \ + (istio::utils::getLogger().isLoggable(istio::utils::Logger::Level::ERROR_)) + +#define MIXER_TRACE_INT(FORMAT, ...) \ + istio::utils::getLogger().log(istio::utils::Logger::Level::TRACE_, \ + FILE_LINE FORMAT, ##__VA_ARGS__) +#define MIXER_DEBUG_INT(FORMAT, ...) \ + istio::utils::getLogger().log(istio::utils::Logger::Level::DEBUG_, \ + FILE_LINE FORMAT, ##__VA_ARGS__) +#define MIXER_INFO_INT(FORMAT, ...) \ + istio::utils::getLogger().log(istio::utils::Logger::Level::INFO_, \ + FILE_LINE FORMAT, ##__VA_ARGS__) +#define MIXER_WARN_INT(FORMAT, ...) \ + istio::utils::getLogger().log(istio::utils::Logger::Level::WARN_, \ + FILE_LINE FORMAT, ##__VA_ARGS__) +#define MIXER_ERROR_INT(FORMAT, ...) \ + istio::utils::getLogger().log(istio::utils::Logger::Level::ERROR_, \ + FILE_LINE FORMAT, ##__VA_ARGS__) + +#define MIXER_TRACE(FORMAT, ...) \ + do { \ + if (MIXER_TRACE_ENABLED) { \ + MIXER_TRACE_INT(FORMAT, ##__VA_ARGS__); \ + } \ + } while (0) + +#define MIXER_DEBUG(FORMAT, ...) \ + do { \ + if (MIXER_DEBUG_ENABLED) { \ + MIXER_DEBUG_INT(FORMAT, ##__VA_ARGS__); \ + } \ + } while (0) + +#define MIXER_INFO(FORMAT, ...) \ + do { \ + if (MIXER_INFO_ENABLED) { \ + MIXER_INFO_INT(FORMAT, ##__VA_ARGS__); \ + } \ + } while (0) + +#define MIXER_WARN(FORMAT, ...) \ + do { \ + if (MIXER_WARN_ENABLED) { \ + MIXER_WARN_INT(FORMAT, ##__VA_ARGS__); \ + } \ + } while (0) + +#define MIXER_ERROR(FORMAT, ...) \ + do { \ + if (MIXER_ERROR_ENABLED) { \ + MIXER_ERROR_INT(FORMAT, ##__VA_ARGS__); \ + } \ + } while (0) diff --git a/src/istio/utils/logger_test.cc b/src/istio/utils/logger_test.cc new file mode 100644 index 00000000000..a03522e578b --- /dev/null +++ b/src/istio/utils/logger_test.cc @@ -0,0 +1,133 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "src/istio/utils/logger.h" +#include "gtest/gtest.h" + +#include + +namespace istio { +namespace utils { + +class CountingArgument { + public: + const char* c_str() { + ++to_string_calls; + return "logged entity"; + } + + int to_string_calls{0}; +}; + +class CountingLogger : public Logger { + public: + CountingLogger(int& is_loggable_calls, int& write_buffer_calls) + : is_loggable_calls_(is_loggable_calls), + write_buffer_calls_(write_buffer_calls) {} + + virtual bool isLoggable(Level level) override { + ++is_loggable_calls_; + + switch (level) { + case Level::TRACE_: + case Level::DEBUG_: + return false; + case Level::INFO_: + case Level::WARN_: + case Level::ERROR_: + return true; + } + } + + virtual void writeBuffer(Level level, const char* buffer) override { + ++write_buffer_calls_; + } + + private: + int& is_loggable_calls_; + int& write_buffer_calls_; +}; + +class LoggerTest : public ::testing::Test { + protected: + virtual void SetUp() override { + std::unique_ptr logger{ + new CountingLogger(is_loggable_calls_, write_buffer_calls_)}; + setLogger(std::move(logger)); + // Set logger itself logs something, so clear the counters + is_loggable_calls_ = 0; + write_buffer_calls_ = 0; + } + + int is_loggable_calls_{0}; + int write_buffer_calls_{0}; +}; + +TEST_F(LoggerTest, CallArgsOnlyIfLoggable) { + CountingArgument entity; + int expected_to_string_calls = 0; + int expected_is_loggable_calls = 0; + int expected_write_buffer_calls = 0; + + // TRACE and DEBUG shouldn't be logged and shouldn't have any affect on the + // arguments to be logged. + + MIXER_TRACE("%s", entity.c_str()); + ++expected_is_loggable_calls; + + EXPECT_EQ(expected_to_string_calls, entity.to_string_calls); + EXPECT_EQ(expected_is_loggable_calls, is_loggable_calls_); + EXPECT_EQ(expected_write_buffer_calls, write_buffer_calls_); + + MIXER_DEBUG("%s", entity.c_str()); + ++expected_is_loggable_calls; + + EXPECT_EQ(expected_to_string_calls, entity.to_string_calls); + EXPECT_EQ(expected_is_loggable_calls, is_loggable_calls_); + EXPECT_EQ(expected_write_buffer_calls, write_buffer_calls_); + + // INFO+ will invoke their arguments once, be logged, and call isLoggable + // twice due to a redundant/defensive isLoggable check. + + MIXER_INFO("%s", entity.c_str()); + expected_is_loggable_calls += 2; + ++expected_to_string_calls; + ++expected_write_buffer_calls; + + EXPECT_EQ(expected_to_string_calls, entity.to_string_calls); + EXPECT_EQ(expected_is_loggable_calls, is_loggable_calls_); + EXPECT_EQ(expected_write_buffer_calls, write_buffer_calls_); + + MIXER_WARN("%s", entity.c_str()); + expected_is_loggable_calls += 2; + ++expected_to_string_calls; + ++expected_write_buffer_calls; + + EXPECT_EQ(expected_to_string_calls, entity.to_string_calls); + EXPECT_EQ(expected_is_loggable_calls, is_loggable_calls_); + EXPECT_EQ(expected_write_buffer_calls, write_buffer_calls_); + + MIXER_ERROR("%s", entity.c_str()); + expected_is_loggable_calls += 2; + ++expected_to_string_calls; + ++expected_write_buffer_calls; + + EXPECT_EQ(expected_to_string_calls, entity.to_string_calls); + EXPECT_EQ(expected_is_loggable_calls, is_loggable_calls_); + EXPECT_EQ(expected_write_buffer_calls, write_buffer_calls_); +} + +} // namespace utils +} // namespace istio diff --git a/test/integration/BUILD b/test/integration/BUILD index b0acfbd303e..af70546752d 100644 --- a/test/integration/BUILD +++ b/test/integration/BUILD @@ -19,11 +19,31 @@ package(default_visibility = ["//visibility:public"]) load( "@envoy//bazel:envoy_build_system.bzl", "envoy_cc_test", + "envoy_cc_test_library", +) + +envoy_cc_test_library( + name = "int_client_server", + srcs = [ + "int_client.cc", + "int_server.cc", + ], + hdrs = [ + "int_client.h", + "int_server.h", + ], + repository = "@envoy", + deps = [ + "@envoy//source/server:server_lib", + "@envoy//test/integration:http_protocol_integration_lib", + ], ) envoy_cc_test( name = "istio_http_integration_test", - srcs = ["istio_http_integration_test.cc"], + srcs = [ + "istio_http_integration_test.cc", + ], repository = "@envoy", deps = [ "//include/istio/utils:attribute_names_header", @@ -37,6 +57,31 @@ envoy_cc_test( ], ) +envoy_cc_test( + name = "mixer_fault_test", + srcs = [ + "mixer_fault_test.cc", + ], + repository = "@envoy", + deps = [ + ":int_client_server", + "//include/istio/utils:attribute_names_header", + "//src/envoy/http/mixer:filter_lib", + "//src/envoy/utils:filter_names_lib", + ], +) + +envoy_cc_test( + name = "int_client_server_test", + srcs = [ + "int_client_server_test.cc", + ], + repository = "@envoy", + deps = [ + ":int_client_server", + ], +) + envoy_cc_test( name = "exchanged_token_integration_test", srcs = ["exchanged_token_integration_test.cc"], diff --git a/test/integration/int_client.cc b/test/integration/int_client.cc new file mode 100644 index 00000000000..dd174ba2914 --- /dev/null +++ b/test/integration/int_client.cc @@ -0,0 +1,678 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "int_client.h" + +#include +#include "common/http/http1/codec_impl.h" +#include "common/http/http2/codec_impl.h" +#include "common/stats/isolated_store_impl.h" +#include "envoy/thread/thread.h" + +namespace Mixer { +namespace Integration { + +class ClientStream : public Envoy::Http::StreamDecoder, + public Envoy::Http::StreamCallbacks, + Envoy::Logger::Loggable { + public: + ClientStream(uint32_t id, ClientConnection &connection, + ClientResponseCallback callback) + : id_(id), connection_(connection), callback_(callback) {} + + virtual ~ClientStream() { + ENVOY_LOG(trace, "ClientStream({}:{}:{}) destroyed", connection_.name(), + connection_.id(), id_); + } + + // + // Envoy::Http::StreamDecoder + // + + virtual void decode100ContinueHeaders(Envoy::Http::HeaderMapPtr &&) override { + ENVOY_LOG(trace, "ClientStream({}:{}:{}) got continue headers", + connection_.name(), connection_.id(), id_); + } + + virtual void decodeHeaders(Envoy::Http::HeaderMapPtr &&response_headers, + bool end_stream) override { + ENVOY_LOG(debug, "ClientStream({}:{}:{}) got response headers", + connection_.name(), connection_.id(), id_); + + response_headers_ = std::move(response_headers); + + if (end_stream) { + onEndStream(); + // stream is now destroyed + } + } + + virtual void decodeData(Envoy::Buffer::Instance &, bool end_stream) override { + ENVOY_LOG(debug, "ClientStream({}:{}:{}) got response body data", + connection_.name(), connection_.id(), id_); + + if (end_stream) { + onEndStream(); + // stream is now destroyed + } + } + + virtual void decodeTrailers(Envoy::Http::HeaderMapPtr &&) override { + ENVOY_LOG(trace, "ClientStream({}:{}:{}) got response trailers", + connection_.name(), connection_.id(), id_); + onEndStream(); + // stream is now destroyed + } + + virtual void decodeMetadata(Envoy::Http::MetadataMapPtr &&) override { + ENVOY_LOG(trace, "ClientStream({}:{}):{} got metadata", connection_.name(), + connection_.id(), id_); + } + + // + // Envoy::Http::StreamCallbacks + // + + virtual void onResetStream(Envoy::Http::StreamResetReason reason, + absl::string_view) override { + // TODO test with h2 to see if we get any of these and whether the + // connection error handling is enough to handle it. + switch (reason) { + case Envoy::Http::StreamResetReason::LocalReset: + ENVOY_LOG(trace, "ClientStream({}:{}:{}) was locally reset", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::LocalRefusedStreamReset: + ENVOY_LOG(trace, "ClientStream({}:{}:{}) refused local stream reset", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::RemoteReset: + ENVOY_LOG(trace, "ClientStream({}:{}:{}) was remotely reset", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::RemoteRefusedStreamReset: + ENVOY_LOG(trace, "ClientStream({}:{}:{}) refused remote stream reset", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::ConnectionFailure: + ENVOY_LOG( + trace, + "ClientStream({}:{}:{}) reseet due to initial connection failure", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::ConnectionTermination: + ENVOY_LOG( + trace, + "ClientStream({}:{}:{}) reset due to underlying connection reset", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::Overflow: + ENVOY_LOG(trace, + "ClientStream({}:{}:{}) reset due to resource overflow", + connection_.name(), connection_.id(), id_); + break; + default: + ENVOY_LOG(trace, "ClientStream({}:{}:{}) reset due to unknown reason", + connection_.name(), connection_.id(), id_); + break; + } + } + + virtual void onAboveWriteBufferHighWatermark() override { + // TODO how should this be handled? + ENVOY_LOG(trace, "ClientStream({}:{}:{}) above write buffer high watermark", + connection_.name(), connection_.id(), id_); + } + + virtual void onBelowWriteBufferLowWatermark() override { + // TODO how should this be handled? + ENVOY_LOG(trace, "ClientStream({}:{}:{}) below write buffer low watermark", + connection_.name(), connection_.id(), id_); + } + + virtual void sendRequest(const Envoy::Http::HeaderMap &request_headers, + const std::chrono::milliseconds timeout) { + if (connection_.networkConnection().state() != + Envoy::Network::Connection::State::Open) { + ENVOY_LOG(warn, + "ClientStream({}:{}:{})'s underlying connection is not open!", + connection_.name(), connection_.id(), id_); + connection_.removeStream(id_); + // This stream is now destroyed + return; + } + + Envoy::Http::StreamEncoder &encoder = + connection_.httpConnection().newStream(*this); + encoder.getStream().addCallbacks(*this); + + ENVOY_LOG(debug, "ClientStream({}:{}:{}) sending request headers", + connection_.name(), connection_.id(), id_); + encoder.encodeHeaders(request_headers, true); + + timeout_timer_ = connection_.dispatcher().createTimer([this, timeout]() { + ENVOY_LOG( + debug, + "ClientStream({}:{}:{}) timed out after {} msec waiting for response", + connection_.name(), connection_.id(), id_, + static_cast(timeout.count())); + callback_(connection_, nullptr); + connection_.removeStream(id_); + // This stream is now destroyed + }); + timeout_timer_->enableTimer(timeout); + } + + private: + virtual void onEndStream() { + ENVOY_LOG(debug, "ClientStream({}:{}:{}) complete", connection_.name(), + connection_.id(), id_); + callback_(connection_, std::move(response_headers_)); + connection_.removeStream(id_); + // This stream is now destroyed + } + + ClientStream(const ClientStream &) = delete; + + void operator=(const ClientStream &) = delete; + + uint32_t id_; + ClientConnection &connection_; + Envoy::Http::HeaderMapPtr response_headers_{nullptr}; + ClientResponseCallback callback_; + Envoy::Event::TimerPtr timeout_timer_{nullptr}; +}; + +class HttpClientReadFilter + : public Envoy::Network::ReadFilter, + Envoy::Logger::Loggable { + public: + HttpClientReadFilter(const std::string name, uint32_t id, + Envoy::Http::ClientConnection &connection) + : name_(name), id_(id), connection_(connection) {} + + virtual ~HttpClientReadFilter() {} + + // + // Envoy::Network::ReadFilter + // + + virtual Envoy::Network::FilterStatus onData(Envoy::Buffer::Instance &data, + bool end_stream) override { + ENVOY_LOG(trace, "ClientConnection({}:{}) got data", name_, id_); + + connection_.dispatch(data); + + if (end_stream) { + // TODO how should this be handled? + ENVOY_LOG(error, "ClientConnection({}:{}) got end stream", name_, id_); + } + + return Envoy::Network::FilterStatus::StopIteration; + } + + virtual Envoy::Network::FilterStatus onNewConnection() override { + return Envoy::Network::FilterStatus::Continue; + } + + virtual void initializeReadFilterCallbacks( + Envoy::Network::ReadFilterCallbacks &) override {} + + private: + HttpClientReadFilter(const HttpClientReadFilter &) = delete; + + void operator=(const HttpClientReadFilter &) = delete; + + std::string name_; + uint32_t id_; + Envoy::Http::ClientConnection &connection_; +}; + +typedef std::unique_ptr HttpClientReadFilterPtr; +typedef std::shared_ptr HttpClientReadFilterSharedPtr; + +class Http1ClientConnection : public ClientConnection { + public: + Http1ClientConnection(Client &client, uint32_t id, + ClientConnectCallback connect_callback, + ClientCloseCallback close_callback, + std::shared_ptr &dispatcher, + Envoy::Network::ClientConnectionPtr network_connection) + : ClientConnection(client, id, connect_callback, close_callback, + dispatcher), + network_connection_(std::move(network_connection)), + http_connection_(*network_connection_, *this), + read_filter_{std::make_shared(client.name(), id, + http_connection_)} { + network_connection_->addReadFilter(read_filter_); + network_connection_->addConnectionCallbacks(*this); + } + + virtual ~Http1ClientConnection() {} + + virtual Envoy::Network::ClientConnection &networkConnection() override { + return *network_connection_; + } + + virtual Envoy::Http::ClientConnection &httpConnection() override { + return http_connection_; + } + + private: + Http1ClientConnection(const Http1ClientConnection &) = delete; + + Http1ClientConnection &operator=(const Http1ClientConnection &) = delete; + + Envoy::Network::ClientConnectionPtr network_connection_; + Envoy::Http::Http1::ClientConnectionImpl http_connection_; + HttpClientReadFilterSharedPtr read_filter_; +}; + +static constexpr uint32_t max_request_headers_kb = 2U; + +class Http2ClientConnection : public ClientConnection { + public: + Http2ClientConnection(Client &client, uint32_t id, + ClientConnectCallback connect_callback, + ClientCloseCallback close_callback, + std::shared_ptr &dispatcher, + Envoy::Network::ClientConnectionPtr network_connection) + : ClientConnection(client, id, connect_callback, close_callback, + dispatcher), + stats_(), + settings_(), + network_connection_(std::move(network_connection)), + http_connection_(*network_connection_, *this, stats_, settings_, + max_request_headers_kb), + read_filter_{std::make_shared(client.name(), id, + http_connection_)} { + network_connection_->addReadFilter(read_filter_); + network_connection_->addConnectionCallbacks(*this); + } + + virtual ~Http2ClientConnection() {} + + virtual Envoy::Network::ClientConnection &networkConnection() override { + return *network_connection_; + } + + virtual Envoy::Http::ClientConnection &httpConnection() override { + return http_connection_; + } + + private: + Http2ClientConnection(const Http2ClientConnection &) = delete; + + Http2ClientConnection &operator=(const Http2ClientConnection &) = delete; + + Envoy::Stats::IsolatedStoreImpl stats_; + Envoy::Http::Http2Settings settings_; + Envoy::Network::ClientConnectionPtr network_connection_; + Envoy::Http::Http2::ClientConnectionImpl http_connection_; + HttpClientReadFilterSharedPtr read_filter_; +}; + +ClientStream &ClientConnection::newStream(ClientResponseCallback callback) { + std::lock_guard guard(streams_lock_); + + uint32_t id = stream_counter_++; + ClientStreamPtr stream = std::make_unique(id, *this, callback); + ClientStream *raw = stream.get(); + streams_[id] = std::move(stream); + + return *raw; +} + +ClientConnection::ClientConnection( + Client &client, uint32_t id, ClientConnectCallback connect_callback, + ClientCloseCallback close_callback, + std::shared_ptr &dispatcher) + : client_(client), + id_(id), + connect_callback_(connect_callback), + close_callback_(close_callback), + dispatcher_(dispatcher) {} + +ClientConnection::~ClientConnection() { + ENVOY_LOG(trace, "ClientConnection({}:{}) destroyed", client_.name(), id_); +} + +const std::string &ClientConnection::name() const { return client_.name(); } + +uint32_t ClientConnection::id() const { return id_; } + +Envoy::Event::Dispatcher &ClientConnection::dispatcher() { + return *dispatcher_; +}; + +void ClientConnection::removeStream(uint32_t stream_id) { + unsigned long size = 0UL; + + { + std::lock_guard guard(streams_lock_); + streams_.erase(stream_id); + size = streams_.size(); + } + + if (0 == size) { + ENVOY_LOG(debug, "ClientConnection({}:{}) is idle", client_.name(), id_); + if (ClientCallbackResult::CLOSE == + connect_callback_(*this, ClientConnectionState::IDLE)) { + // This will trigger a + // networkConnection().onEvent(Envoy::Network::ConnectionEvent::LocalClose) + networkConnection().close(Envoy::Network::ConnectionCloseType::NoFlush); + } + } +} + +void ClientConnection::onEvent(Envoy::Network::ConnectionEvent event) { + switch (event) { + // properly on connection destruction. + case Envoy::Network::ConnectionEvent::RemoteClose: + if (established_) { + ENVOY_LOG(debug, "ClientConnection({}:{}) closed by peer or reset", + client_.name(), id_); + close_callback_(*this, ClientCloseReason::REMOTE_CLOSE); + } else { + ENVOY_LOG(debug, "ClientConnection({}:{}) cannot connect to peer", + client_.name(), id_); + close_callback_(*this, ClientCloseReason::CONNECT_FAILED); + } + client_.releaseConnection(*this); + // ClientConnection has been destroyed + return; + case Envoy::Network::ConnectionEvent::LocalClose: + ENVOY_LOG(debug, "ClientConnection({}:{}) closed locally", client_.name(), + id_); + close_callback_(*this, ClientCloseReason::LOCAL_CLOSE); + client_.releaseConnection(*this); + // ClientConnection has been destroyed + return; + case Envoy::Network::ConnectionEvent::Connected: + established_ = true; + ENVOY_LOG(debug, "ClientConnection({}:{}) established", client_.name(), + id_); + if (ClientCallbackResult::CLOSE == + connect_callback_(*this, ClientConnectionState::CONNECTED)) { + // This will trigger a + // networkConnection().onEvent(Envoy::Network::ConnectionEvent::LocalClose) + networkConnection().close(Envoy::Network::ConnectionCloseType::NoFlush); + } + break; + default: + ENVOY_LOG(error, "ClientConnection({}:{}) got unknown event", + client_.name(), id_); + }; +} + +void ClientConnection::onAboveWriteBufferHighWatermark() { + ENVOY_LOG(warn, "ClientConnection({}:{}) above write buffer high watermark", + client_.name(), id_); + // TODO how should this be handled? + httpConnection().onUnderlyingConnectionAboveWriteBufferHighWatermark(); +} + +void ClientConnection::onBelowWriteBufferLowWatermark() { + ENVOY_LOG(warn, "ClientConnection({}:{}) below write buffer low watermark", + client_.name(), id_); + // TODO how should this be handled? + httpConnection().onUnderlyingConnectionBelowWriteBufferLowWatermark(); +} + +void ClientConnection::onGoAway() { + ENVOY_LOG(warn, "ClientConnection({}:{}) remote closed", client_.name(), id_); + // TODO how should this be handled? +} + +void ClientConnection::sendRequest(const Envoy::Http::HeaderMap &headers, + ClientResponseCallback callback, + const std::chrono::milliseconds timeout) { + newStream(callback).sendRequest(headers, timeout); +} + +Client::Client(const std::string &name) + : name_(name), + stats_(), + thread_(nullptr), + time_system_(), + api_(Envoy::Thread::ThreadFactorySingleton::get(), stats_, time_system_), + dispatcher_{api_.allocateDispatcher()} {} + +Client::~Client() { + stop(); + ENVOY_LOG(trace, "Client({}) destroyed", name_); +} + +const std::string &Client::name() const { return name_; } + +void Client::connect( + Envoy::Network::TransportSocketFactory &socket_factory, + HttpVersion http_version, + Envoy::Network::Address::InstanceConstSharedPtr &address, + const Envoy::Network::ConnectionSocket::OptionsSharedPtr &sockopts, + ClientConnectCallback connect_cb, ClientCloseCallback close_cb) { + dispatcher_->post([this, &socket_factory, http_version, address, sockopts, + connect_cb, close_cb]() { + Envoy::Network::ClientConnectionPtr connection = + dispatcher_->createClientConnection( + address, nullptr, socket_factory.createTransportSocket(nullptr), + sockopts); + uint32_t id = connection_counter_++; + + ClientConnectionPtr ptr; + if (HttpVersion::HTTP1 == http_version) { + ptr = std::make_unique( + *this, id, connect_cb, close_cb, dispatcher_, std::move(connection)); + } else { + ptr = std::make_unique( + *this, id, connect_cb, close_cb, dispatcher_, std::move(connection)); + } + ClientConnection *raw = ptr.get(); + + { + std::lock_guard guard(connections_lock_); + connections_[id] = std::move(ptr); + } + + ENVOY_LOG(debug, "ClientConnection({}:{}) connecting to {}", name_, id, + address->asString()); + raw->networkConnection().connect(); + }); +} + +void Client::start() { + std::promise promise; + + if (is_running_) { + return; + } + + thread_ = api_.threadFactory().createThread([this, &promise]() { + ENVOY_LOG(debug, "Client({}) dispatcher started", name_); + + is_running_ = true; + promise.set_value(true); // do not use promise again after this + while (is_running_) { + dispatcher_->run(Envoy::Event::Dispatcher::RunType::NonBlock); + } + + ENVOY_LOG(debug, "Client({}) dispatcher stopped", name_); + }); + + promise.get_future().get(); +} + +void Client::stop() { + ENVOY_LOG(debug, "Client({}) stop requested", name_); + + is_running_ = false; + if (thread_) { + thread_->join(); + thread_ = nullptr; + } + + ENVOY_LOG(debug, "Client({}) stopped", name_); +} + +void Client::releaseConnection(uint32_t id) { + size_t erased = 0; + { + std::lock_guard guard(connections_lock_); + dispatcher_->deferredDelete(std::move(connections_[id])); + erased = connections_.erase(id); + } + if (1 > erased) { + ENVOY_LOG(error, "Client({}) cannot remove ClientConnection({}:{})", name_, + name_, id); + } +} + +void Client::releaseConnection(ClientConnection &connection) { + releaseConnection(connection.id()); +} + +LoadGenerator::LoadGenerator( + Client &client, Envoy::Network::TransportSocketFactory &socket_factory, + HttpVersion http_version, + Envoy::Network::Address::InstanceConstSharedPtr &address, + const Envoy::Network::ConnectionSocket::OptionsSharedPtr &sockopts) + : client_(client), + socket_factory_(socket_factory), + http_version_(http_version), + address_(address), + sockopts_(sockopts) { + response_callback_ = [this](ClientConnection &connection, + Envoy::Http::HeaderMapPtr response) { + if (!response) { + ENVOY_LOG(debug, "Connection({}:{}) timedout waiting for response", + connection.name(), connection.id()); + ++response_timeouts_; + return; + } + + ++responses_received_; + + uint64_t status = 0; + if (!Envoy::StringUtil::atoull(response->Status()->value().c_str(), + status)) { + ENVOY_LOG(error, "Connection({}:{}) received response with bad status", + connection.name(), connection.id()); + } else if (200 <= status && status < 300) { + ++class_2xx_; + } else if (400 <= status && status < 500) { + ++class_4xx_; + } else if (500 <= status && status < 600) { + ++class_5xx_; + } + + if (0 >= requests_remaining_--) { + // Break if we've already sent or scheduled every request we wanted to + return; + } + + connection.sendRequest(*request_, response_callback_, timeout_); + }; + + connect_callback_ = [this]( + ClientConnection &connection, + ClientConnectionState state) -> ClientCallbackResult { + if (state == ClientConnectionState::IDLE) { + // This will result in a CloseReason::LOCAL_CLOSE passed to the + // close_callback + return ClientCallbackResult::CLOSE; + } + // If ConnectionResult::SUCCESS: + + ++connect_successes_; + + if (0 >= requests_remaining_--) { + // This will result in a ConnectionState::IDLE passed to this callback + // once all active streams have finished. + return ClientCallbackResult::CONTINUE; + } + + connection.sendRequest(*request_, response_callback_, timeout_); + + return ClientCallbackResult::CONTINUE; + }; + + close_callback_ = [this](ClientConnection &, ClientCloseReason reason) { + switch (reason) { + case ClientCloseReason::CONNECT_FAILED: + ++connect_failures_; + break; + case ClientCloseReason::REMOTE_CLOSE: + ++remote_closes_; + break; + case ClientCloseReason::LOCAL_CLOSE: + // We initiated this by responding to ConnectionState::IDLE with a + // CallbackResult::Close + ++local_closes_; + break; + } + + // Unblock run() once we've seen a close for every connection initiated. + if (remote_closes_ + local_closes_ + connect_failures_ >= + connections_to_initiate_) { + promise_all_connections_closed_.set_value(true); + } + }; +} + +LoadGenerator::~LoadGenerator() {} + +void LoadGenerator::run(uint32_t connections, uint32_t requests, + Envoy::Http::HeaderMapPtr request, + const std::chrono::milliseconds timeout) { + connections_to_initiate_ = connections; + requests_to_send_ = requests; + request_ = std::move(request); + promise_all_connections_closed_ = std::promise(); + timeout_ = timeout; + requests_remaining_ = requests_to_send_; + connect_failures_ = 0; + connect_successes_ = 0; + responses_received_ = 0; + response_timeouts_ = 0; + local_closes_ = 0; + remote_closes_ = 0; + class_2xx_ = 0; + class_4xx_ = 0; + class_5xx_ = 0; + + client_.start(); // idempotent + + for (uint32_t i = 0; i < connections_to_initiate_; ++i) { + client_.connect(socket_factory_, http_version_, address_, sockopts_, + connect_callback_, close_callback_); + } + + promise_all_connections_closed_.get_future().get(); +} + +uint32_t LoadGenerator::connectFailures() const { return connect_failures_; } +uint32_t LoadGenerator::connectSuccesses() const { return connect_successes_; } +uint32_t LoadGenerator::responsesReceived() const { + return responses_received_; +} +uint32_t LoadGenerator::responseTimeouts() const { return response_timeouts_; } +uint32_t LoadGenerator::localCloses() const { return local_closes_; } +uint32_t LoadGenerator::remoteCloses() const { return remote_closes_; } +uint32_t LoadGenerator::class2xxResponses() const { return class_2xx_; } +uint32_t LoadGenerator::class4xxResponses() const { return class_4xx_; } +uint32_t LoadGenerator::class5xxResponses() const { return class_5xx_; } + +} // namespace Integration +} // namespace Mixer diff --git a/test/integration/int_client.h b/test/integration/int_client.h new file mode 100644 index 00000000000..2a243c98784 --- /dev/null +++ b/test/integration/int_client.h @@ -0,0 +1,316 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include +#include "common/api/api_impl.h" +#include "common/common/thread.h" +#include "common/network/raw_buffer_socket.h" +#include "common/stats/isolated_store_impl.h" +#include "envoy/api/api.h" +#include "envoy/event/dispatcher.h" +#include "envoy/http/codec.h" +#include "envoy/network/address.h" +#include "envoy/thread/thread.h" +#include "fmt/printf.h" +#include "test/test_common/test_time.h" +#include "test/test_common/utility.h" + +namespace Mixer { +namespace Integration { +enum class HttpVersion { HTTP1, HTTP2 }; + +class ClientStream; +class ClientConnection; +class Client; +typedef std::unique_ptr ClientStreamPtr; +typedef std::shared_ptr ClientStreamSharedPtr; +typedef std::unique_ptr ClientConnectionPtr; +typedef std::shared_ptr ClientConnectionSharedPtr; +typedef std::unique_ptr ClientPtr; +typedef std::shared_ptr ClientSharedPtr; + +enum class ClientConnectionState { + CONNECTED, // Connection established. Non-Terminal. Will be followed by one + // of the codes below. + IDLE, // Connection has no active streams. Non-Terminal. Close it, use it, + // or put it in a pool. +}; + +enum class ClientCloseReason { + CONNECT_FAILED, // Connection could not be established + REMOTE_CLOSE, // Peer closed or connection was reset after it was + // established. + LOCAL_CLOSE // This process decided to close the connection. +}; + +enum class ClientCallbackResult { + CONTINUE, // Leave the connection open + CLOSE // Close the connection. +}; + +/** + * Handle a non-terminal connection event asynchronously. + * + * @param connection The connection with the event + * @param state The state of the connection (connected or idle). + */ +typedef std::function + ClientConnectCallback; + +/** + * Handle a terminal connection close event asynchronously. + * + * @param connection The connection that was closed + * @param reason The reason the connection was closed + */ +typedef std::function + ClientCloseCallback; + +/** + * Handle a response asynchronously. + * + * @param connection The connection that received the response. + * @param response_headers The response headers or null if timed out. + */ +typedef std::function + ClientResponseCallback; + +class ClientConnection + : public Envoy::Network::ConnectionCallbacks, + public Envoy::Http::ConnectionCallbacks, + public Envoy::Event::DeferredDeletable, + protected Envoy::Logger::Loggable { + public: + ClientConnection(Client &client, uint32_t id, + ClientConnectCallback connect_callback, + ClientCloseCallback close_callback, + std::shared_ptr &dispatcher); + + virtual ~ClientConnection(); + + const std::string &name() const; + + uint32_t id() const; + + virtual Envoy::Network::ClientConnection &networkConnection() PURE; + + virtual Envoy::Http::ClientConnection &httpConnection() PURE; + + Envoy::Event::Dispatcher &dispatcher(); + + /** + * Asynchronously send a request. On HTTP1.1 connections at most one request + * can be outstanding on a connection. For HTTP2 multiple requests may + * outstanding. + * + * @param request_headers + * @param callback + */ + virtual void sendRequest(const Envoy::Http::HeaderMap &request_headers, + ClientResponseCallback callback, + const std::chrono::milliseconds timeout = + std::chrono::milliseconds(5'000)); + + /** + * For internal use + * + * @param stream_id + */ + void removeStream(uint32_t stream_id); + + // + // Envoy::Network::ConnectionCallbacks + // + + virtual void onEvent(Envoy::Network::ConnectionEvent event) override; + + virtual void onAboveWriteBufferHighWatermark() override; + + virtual void onBelowWriteBufferLowWatermark() override; + + // + // Envoy::Http::ConnectionCallbacks + // + + virtual void onGoAway() override; + + private: + ClientConnection(const ClientConnection &) = delete; + + ClientConnection &operator=(const ClientConnection &) = delete; + + ClientStream &newStream(ClientResponseCallback callback); + + Client &client_; + uint32_t id_; + ClientConnectCallback connect_callback_; + ClientCloseCallback close_callback_; + std::shared_ptr dispatcher_; + bool established_{false}; + + std::mutex streams_lock_; + std::unordered_map streams_; + std::atomic stream_counter_{0U}; +}; + +class Client : Envoy::Logger::Loggable { + public: + Client(const std::string &name); + + virtual ~Client(); + + const std::string &name() const; + + /** + * Start the client's dispatcher in a background thread. This is a noop if + * the client has already been started. This will block until the dispatcher + * is running on another thread. + */ + void start(); + + /** + * Stop the client's dispatcher and join the background thread. This will + * block until the background thread exits. + */ + void stop(); + + /** + * For internal use + */ + void releaseConnection(uint32_t id); + + /** + * For internal use + */ + void releaseConnection(ClientConnection &connection); + + /** + * Asynchronously connect to a peer. The connect_callback will be called on + * successful connection establishment and also on idle state, giving the + * caller the opportunity to reuse or close connections. The close_callback + * will be called after the connection is closed, giving the caller the + * opportunity to cleanup additional resources, etc. + */ + void connect( + Envoy::Network::TransportSocketFactory &socket_factory, + HttpVersion http_version, + Envoy::Network::Address::InstanceConstSharedPtr &address, + const Envoy::Network::ConnectionSocket::OptionsSharedPtr &sockopts, + ClientConnectCallback connect_callback, + ClientCloseCallback close_callback); + + private: + Client(const Client &) = delete; + + Client &operator=(const Client &) = delete; + + std::atomic is_running_{false}; + std::string name_; + Envoy::Stats::IsolatedStoreImpl stats_; + Envoy::Thread::ThreadPtr thread_; + Envoy::Event::TestRealTimeSystem time_system_; + Envoy::Api::Impl api_; + std::shared_ptr dispatcher_; + + std::mutex connections_lock_; + std::unordered_map connections_; + uint32_t connection_counter_{0U}; +}; + +class LoadGenerator : Envoy::Logger::Loggable { + public: + /** + * A wrapper around Client and its callbacks that implements a simple load + * generator. + * + * @param socket_factory Socket factory (use for plain TCP vs. TLS) + * @param http_version HTTP version (h1 vs h2) + * @param address Address (ip addr, port, ip protocol version) to connect to + * @param sockopts Socket options for the client sockets. Use default if + * null. + */ + LoadGenerator(Client &client, + Envoy::Network::TransportSocketFactory &socket_factory, + HttpVersion http_version, + Envoy::Network::Address::InstanceConstSharedPtr &address, + const Envoy::Network::ConnectionSocket::OptionsSharedPtr + &sockopts = nullptr); + + virtual ~LoadGenerator(); + + /** + * Generate load and block until all connections have finished (successfully + * or otherwise). + * + * @param connections Connections to create + * @param requests Total requests across all connections to send + * @param request The request to send + * @param timeout The time in msec to wait to receive a response after sending + * each request. + */ + void run(uint32_t connections, uint32_t requests, + Envoy::Http::HeaderMapPtr request, + const std::chrono::milliseconds timeout = + std::chrono::milliseconds(5'000)); + + uint32_t connectFailures() const; + uint32_t connectSuccesses() const; + uint32_t responsesReceived() const; + uint32_t responseTimeouts() const; + uint32_t localCloses() const; + uint32_t remoteCloses() const; + uint32_t class2xxResponses() const; + uint32_t class4xxResponses() const; + uint32_t class5xxResponses() const; + + private: + LoadGenerator(const LoadGenerator &) = delete; + void operator=(const LoadGenerator &) = delete; + + uint32_t connections_to_initiate_{0}; + uint32_t requests_to_send_{0}; + Envoy::Http::HeaderMapPtr request_{}; + Client &client_; + Envoy::Network::TransportSocketFactory &socket_factory_; + HttpVersion http_version_; + Envoy::Network::Address::InstanceConstSharedPtr address_; + const Envoy::Network::ConnectionSocket::OptionsSharedPtr sockopts_; + + ClientConnectCallback connect_callback_; + ClientResponseCallback response_callback_; + ClientCloseCallback close_callback_; + std::chrono::milliseconds timeout_{std::chrono::milliseconds(0)}; + std::atomic requests_remaining_{0}; + std::atomic connect_failures_{0}; + std::atomic connect_successes_{0}; + std::atomic responses_received_{0}; + std::atomic response_timeouts_{0}; + std::atomic local_closes_{0}; + std::atomic remote_closes_{0}; + std::atomic class_2xx_{0}; + std::atomic class_4xx_{0}; + std::atomic class_5xx_{0}; + std::promise promise_all_connections_closed_; +}; + +typedef std::unique_ptr LoadGeneratorPtr; + +} // namespace Integration +} // namespace Mixer \ No newline at end of file diff --git a/test/integration/int_client_server_test.cc b/test/integration/int_client_server_test.cc new file mode 100644 index 00000000000..b229d7f5b74 --- /dev/null +++ b/test/integration/int_client_server_test.cc @@ -0,0 +1,379 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include "common/network/utility.h" +#include "gtest/gtest.h" +#include "int_client.h" +#include "int_server.h" +#include "test/test_common/network_utility.h" + +namespace Mixer { +namespace Integration { + +class ClientServerTest : public testing::Test, + Envoy::Logger::Loggable { + public: + ClientServerTest() + : transport_socket_factory_(), + ip_version_(Envoy::Network::Address::IpVersion::v4), + listening_socket_( + Envoy::Network::Utility::parseInternetAddressAndPort(fmt::format( + "{}:{}", + Envoy::Network::Test::getAnyAddressUrlString(ip_version_), 0)), + nullptr, true), + client_("client"), + server_("server", listening_socket_, transport_socket_factory_, + Envoy::Http::CodecClient::Type::HTTP1) {} + + protected: + Envoy::Network::RawBufferSocketFactory transport_socket_factory_; + Envoy::Network::Address::IpVersion ip_version_; + Envoy::Network::TcpListenSocket listening_socket_; + Client client_; + Server server_; +}; + +TEST_F(ClientServerTest, HappyPath) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::info); + + constexpr uint32_t connections_to_initiate = 30; + constexpr uint32_t requests_to_send = 30 * connections_to_initiate; + + // + // Server Setup + // + + ServerCallbackHelper server_callbacks; // sends a 200 OK to everything + server_.start(server_callbacks); + + // + // Client setup + // + + Envoy::Network::Address::InstanceConstSharedPtr address = + listening_socket_.localAddress(); + LoadGenerator load_generator(client_, transport_socket_factory_, + HttpVersion::HTTP1, address); + + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + load_generator.run(connections_to_initiate, requests_to_send, + std::move(request)); + + // wait until the server has closed all connections created by the client + server_callbacks.wait(load_generator.connectSuccesses()); + + // + // Evaluate test + // + + // All client connections are successfully established. + EXPECT_EQ(load_generator.connectSuccesses(), connections_to_initiate); + EXPECT_EQ(0, load_generator.connectFailures()); + // Client close callback called for every client connection. + EXPECT_EQ(load_generator.localCloses(), connections_to_initiate); + // Client response callback is called for every request sent + EXPECT_EQ(load_generator.responsesReceived(), requests_to_send); + // Every response was a 2xx class + EXPECT_EQ(load_generator.class2xxResponses(), requests_to_send); + EXPECT_EQ(0, load_generator.class4xxResponses()); + EXPECT_EQ(0, load_generator.class5xxResponses()); + // No client sockets are rudely closed by server / no client sockets are + // reset. + EXPECT_EQ(0, load_generator.remoteCloses()); + EXPECT_EQ(0, load_generator.responseTimeouts()); + + // Server accept callback is called for every client connection initiated. + EXPECT_EQ(server_callbacks.connectionsAccepted(), connections_to_initiate); + // Server request callback is called for every client request sent + EXPECT_EQ(server_callbacks.requestsReceived(), requests_to_send); + // Server does not close its own sockets but instead relies on the client to + // initate the close + EXPECT_EQ(0, server_callbacks.localCloses()); + // Server sees a client-initiated close for every socket it accepts + EXPECT_EQ(server_callbacks.remoteCloses(), + server_callbacks.connectionsAccepted()); +} + +TEST_F(ClientServerTest, AcceptAndClose) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::info); + + constexpr uint32_t connections_to_initiate = 30; + constexpr uint32_t requests_to_send = 30 * connections_to_initiate; + + // + // Server Setup + // + + // Immediately close any connection accepted. + ServerCallbackHelper server_callbacks( + [](ServerConnection &, ServerStream &, Envoy::Http::HeaderMapPtr &&) { + GTEST_FATAL_FAILURE_( + "Connections immediately closed so no response should be received"); + }, + [](ServerConnection &) -> ServerCallbackResult { + return ServerCallbackResult::CLOSE; + }); + + server_.start(server_callbacks); + + // + // Client setup + // + + Envoy::Network::Address::InstanceConstSharedPtr address = + listening_socket_.localAddress(); + LoadGenerator load_generator(client_, transport_socket_factory_, + HttpVersion::HTTP1, address); + + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + load_generator.run(connections_to_initiate, requests_to_send, + std::move(request)); + + // wait until the server has closed all connections created by the client + server_callbacks.wait(load_generator.connectSuccesses()); + + // + // Evaluate test + // + + // Assert that all connections succeed but no responses are received and the + // server closes the connections. + EXPECT_EQ(load_generator.connectSuccesses(), connections_to_initiate); + EXPECT_EQ(0, load_generator.connectFailures()); + EXPECT_EQ(load_generator.remoteCloses(), connections_to_initiate); + EXPECT_EQ(0, load_generator.localCloses()); + EXPECT_EQ(0, load_generator.responsesReceived()); + EXPECT_EQ(0, load_generator.class2xxResponses()); + EXPECT_EQ(0, load_generator.class4xxResponses()); + EXPECT_EQ(0, load_generator.class5xxResponses()); + EXPECT_EQ(0, load_generator.responseTimeouts()); + + // Server accept callback is called for every client connection initiated. + EXPECT_EQ(server_callbacks.connectionsAccepted(), connections_to_initiate); + // Server request callback is never called + EXPECT_EQ(0, server_callbacks.requestsReceived()); + // Server closes every connection + EXPECT_EQ(server_callbacks.connectionsAccepted(), + server_callbacks.localCloses()); + EXPECT_EQ(0, server_callbacks.remoteCloses()); +} + +TEST_F(ClientServerTest, SlowResponse) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::info); + + constexpr uint32_t connections_to_initiate = 30; + constexpr uint32_t requests_to_send = 30 * connections_to_initiate; + + // + // Server Setup + // + + // Take a really long time (500 msec) to send a 200 OK response. + ServerCallbackHelper server_callbacks([](ServerConnection &, + ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + Envoy::Http::TestHeaderMapImpl response{{":status", "200"}}; + stream.sendResponseHeaders(response, std::chrono::milliseconds(500)); + }); + + server_.start(server_callbacks); + + // + // Client setup + // + + Envoy::Network::Address::InstanceConstSharedPtr address = + listening_socket_.localAddress(); + LoadGenerator load_generator(client_, transport_socket_factory_, + HttpVersion::HTTP1, address); + + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + load_generator.run(connections_to_initiate, requests_to_send, + std::move(request), std::chrono::milliseconds(250)); + + // wait until the server has closed all connections created by the client + server_callbacks.wait(load_generator.connectSuccesses()); + + // + // Evaluate test + // + + // Assert that all connections succeed but all responses timeout leading to + // local closing of all connections. + EXPECT_EQ(load_generator.connectSuccesses(), connections_to_initiate); + EXPECT_EQ(0, load_generator.connectFailures()); + EXPECT_EQ(load_generator.responseTimeouts(), connections_to_initiate); + EXPECT_EQ(load_generator.localCloses(), connections_to_initiate); + EXPECT_EQ(0, load_generator.remoteCloses()); + EXPECT_EQ(0, load_generator.responsesReceived()); + EXPECT_EQ(0, load_generator.class2xxResponses()); + EXPECT_EQ(0, load_generator.class4xxResponses()); + EXPECT_EQ(0, load_generator.class5xxResponses()); + + // Server accept callback is called for every client connection initiated. + EXPECT_EQ(server_callbacks.connectionsAccepted(), connections_to_initiate); + // Server receives a request on each connection + EXPECT_EQ(server_callbacks.requestsReceived(), connections_to_initiate); + // Server sees that the client closes each connection after it gives up + EXPECT_EQ(server_callbacks.connectionsAccepted(), + server_callbacks.remoteCloses()); + EXPECT_EQ(0, server_callbacks.localCloses()); +} + +TEST_F(ClientServerTest, NoServer) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::info); + + constexpr uint32_t connections_to_initiate = 30; + constexpr uint32_t requests_to_send = 30 * connections_to_initiate; + + // Create a listening socket bound to an ephemeral port picked by the kernel, + // but don't create a server to call listen() on it. Result will be + // ECONNREFUSEDs and we won't accidentally send connects to another process. + + Envoy::Network::TcpListenSocket listening_socket( + Envoy::Network::Utility::parseInternetAddressAndPort(fmt::format( + "{}:{}", Envoy::Network::Test::getAnyAddressUrlString(ip_version_), + 0)), + nullptr, true); + uint16_t port = + static_cast(listening_socket.localAddress()->ip()->port()); + + Envoy::Network::Address::InstanceConstSharedPtr address = + Envoy::Network::Utility::parseInternetAddress("127.0.0.1", port); + + // + // Client setup + // + + LoadGenerator load_generator(client_, transport_socket_factory_, + HttpVersion::HTTP1, address); + + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + load_generator.run(connections_to_initiate, requests_to_send, + std::move(request)); + + // + // Evaluate test + // + + // All client connections fail + EXPECT_EQ(load_generator.connectFailures(), connections_to_initiate); + // Nothing else happened + EXPECT_EQ(0, load_generator.connectSuccesses()); + EXPECT_EQ(0, load_generator.localCloses()); + EXPECT_EQ(0, load_generator.responseTimeouts()); + EXPECT_EQ(0, load_generator.responsesReceived()); + EXPECT_EQ(0, load_generator.class2xxResponses()); + EXPECT_EQ(0, load_generator.class4xxResponses()); + EXPECT_EQ(0, load_generator.class5xxResponses()); + EXPECT_EQ(0, load_generator.remoteCloses()); +} + +TEST_F(ClientServerTest, NoAccept) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::info); + + constexpr uint32_t connections_to_initiate = 30; + constexpr uint32_t requests_to_send = 30 * connections_to_initiate; + + // + // Server Setup + // + + ServerCallbackHelper server_callbacks; // sends a 200 OK to everything + server_.start(server_callbacks); + + // but don't call accept() on the listening socket + server_.stopAcceptingConnections(); + + // + // Client setup + // + + Envoy::Network::Address::InstanceConstSharedPtr address = + listening_socket_.localAddress(); + LoadGenerator load_generator(client_, transport_socket_factory_, + HttpVersion::HTTP1, address); + + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + load_generator.run(connections_to_initiate, requests_to_send, + std::move(request), std::chrono::milliseconds(250)); + + // + // Evaluate test + // + + // Assert that all connections succeed but all responses timeout leading to + // local closing of all connections. + EXPECT_EQ(load_generator.connectSuccesses(), connections_to_initiate); + EXPECT_EQ(0, load_generator.connectFailures()); + EXPECT_EQ(load_generator.responseTimeouts(), connections_to_initiate); + EXPECT_EQ(load_generator.localCloses(), connections_to_initiate); + EXPECT_EQ(0, load_generator.remoteCloses()); + EXPECT_EQ(0, load_generator.responsesReceived()); + EXPECT_EQ(0, load_generator.class2xxResponses()); + EXPECT_EQ(0, load_generator.class4xxResponses()); + EXPECT_EQ(0, load_generator.class5xxResponses()); + + // From the server point of view, nothing happened + EXPECT_EQ(0, server_callbacks.connectionsAccepted()); + EXPECT_EQ(0, server_callbacks.requestsReceived()); + EXPECT_EQ(0, server_callbacks.connectionsAccepted()); + EXPECT_EQ(0, server_callbacks.remoteCloses()); + EXPECT_EQ(0, server_callbacks.localCloses()); +} + +} // namespace Integration +} // namespace Mixer diff --git a/test/integration/int_server.cc b/test/integration/int_server.cc new file mode 100644 index 00000000000..270ee5a293a --- /dev/null +++ b/test/integration/int_server.cc @@ -0,0 +1,817 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "int_server.h" +#include +#include "common/common/lock_guard.h" +#include "common/common/logger.h" +#include "common/grpc/codec.h" +#include "common/http/conn_manager_config.h" +#include "common/http/conn_manager_impl.h" +#include "common/http/exception.h" +#include "common/http/http1/codec_impl.h" +#include "common/http/http2/codec_impl.h" +#include "common/network/listen_socket_impl.h" +#include "common/network/raw_buffer_socket.h" +#include "envoy/http/codec.h" +#include "envoy/network/transport_socket.h" +#include "fmt/printf.h" +#include "server/connection_handler_impl.h" +#include "test/test_common/network_utility.h" +#include "test/test_common/utility.h" + +namespace Mixer { +namespace Integration { + +static Envoy::Http::LowerCaseString RequestId(std::string("x-request-id")); + +ServerStream::ServerStream() {} + +ServerStream::~ServerStream() {} + +class ServerStreamImpl : public ServerStream, + public Envoy::Http::StreamDecoder, + public Envoy::Http::StreamCallbacks, + Envoy::Logger::Loggable { + public: + ServerStreamImpl(uint32_t id, ServerConnection &connection, + ServerRequestCallback request_callback, + Envoy::Http::StreamEncoder &stream_encoder) + : id_(id), + connection_(connection), + request_callback_(request_callback), + stream_encoder_(stream_encoder) {} + + virtual ~ServerStreamImpl() { + ENVOY_LOG(trace, "ServerStream({}:{}:{}) destroyed", connection_.name(), + connection_.id(), id_); + } + + ServerStreamImpl(ServerStreamImpl &&) = default; + ServerStreamImpl &operator=(ServerStreamImpl &&) = default; + + // + // ServerStream + // + + virtual void sendResponseHeaders( + const Envoy::Http::HeaderMap &response_headers, + const std::chrono::milliseconds delay) override { + if (connection_.networkConnection().state() != + Envoy::Network::Connection::State::Open) { + ENVOY_LOG(warn, + "ServerStream({}:{}:{})'s underlying connection is not open!", + connection_.name(), connection_.id(), id_); + // TODO return error to caller + return; + } + + if (delay <= std::chrono::milliseconds(0)) { + ENVOY_LOG(debug, "ServerStream({}:{}:{}) sending response headers", + connection_.name(), connection_.id(), id_); + stream_encoder_.encodeHeaders(response_headers, true); + return; + } + + // Limitation: at most one response can be sent on a stream at a time. + assert(nullptr == delay_timer_.get()); + if (delay_timer_.get()) { + return; + } + + response_headers_ = + std::make_unique(response_headers); + delay_timer_ = connection_.dispatcher().createTimer([this, delay]() { + ENVOY_LOG( + debug, + "ServerStream({}:{}:{}) sending response headers after {} msec delay", + connection_.name(), connection_.id(), id_, + static_cast(delay.count())); + stream_encoder_.encodeHeaders(*response_headers_, true); + delay_timer_->disableTimer(); + delay_timer_ = nullptr; + response_headers_ = nullptr; + }); + delay_timer_->enableTimer(delay); + } + + virtual void sendGrpcResponse( + Envoy::Grpc::Status::GrpcStatus status, + const Envoy::Protobuf::Message &message, + const std::chrono::milliseconds delay) override { + // Limitation: at most one response can be sent on a stream at a time. + assert(nullptr == delay_timer_.get()); + if (delay_timer_.get()) { + return; + } + + response_status_ = status; + response_body_ = Envoy::Grpc::Common::serializeBody(message); + Envoy::Event::TimerCb send_grpc_response = [this, delay]() { + ENVOY_LOG( + debug, + "ServerStream({}:{}:{}) sending gRPC response after {} msec delay", + connection_.name(), connection_.id(), id_, + static_cast(delay.count())); + stream_encoder_.encodeHeaders( + Envoy::Http::TestHeaderMapImpl{{":status", "200"}}, false); + stream_encoder_.encodeData(*response_body_, false); + stream_encoder_.encodeTrailers(Envoy::Http::TestHeaderMapImpl{ + {"grpc-status", + std::to_string(static_cast(response_status_))}}); + }; + + if (delay <= std::chrono::milliseconds(0)) { + send_grpc_response(); + return; + } + + delay_timer_ = + connection_.dispatcher().createTimer([this, send_grpc_response]() { + send_grpc_response(); + delay_timer_->disableTimer(); + }); + + delay_timer_->enableTimer(delay); + } + + // + // Envoy::Http::StreamDecoder + // + + virtual void decode100ContinueHeaders(Envoy::Http::HeaderMapPtr &&) override { + ENVOY_LOG(error, "ServerStream({}:{}:{}) got continue headers?!?!", + connection_.name(), connection_.id(), id_); + } + + /** + * Called with decoded headers, optionally indicating end of stream. + * @param headers supplies the decoded headers map that is moved into the + * callee. + * @param end_stream supplies whether this is a header only request/response. + */ + virtual void decodeHeaders(Envoy::Http::HeaderMapPtr &&headers, + bool end_stream) override { + ENVOY_LOG(debug, "ServerStream({}:{}:{}) got request headers", + connection_.name(), connection_.id(), id_); + + request_headers_ = std::move(headers); + + /* TODO use x-request-id for e2e logging + * + const Envoy::Http::HeaderEntry *header = + request_headers_->get(RequestId); + + if (header) { + request_id_ = header->value().c_str(); + } + */ + + if (end_stream) { + onEndStream(); + // stream is now destroyed + } + } + + virtual void decodeData(Envoy::Buffer::Instance &, bool end_stream) override { + ENVOY_LOG(debug, "ServerStream({}:{}:{}) got request body data", + connection_.name(), connection_.id(), id_); + + if (end_stream) { + onEndStream(); + // stream is now destroyed + } + } + + virtual void decodeTrailers(Envoy::Http::HeaderMapPtr &&) override { + ENVOY_LOG(trace, "ServerStream({}:{}:{}) got request trailers", + connection_.name(), connection_.id(), id_); + onEndStream(); + // stream is now destroyed + } + + virtual void decodeMetadata(Envoy::Http::MetadataMapPtr &&) override { + ENVOY_LOG(trace, "ServerStream({}:{}):{} got metadata", connection_.name(), + connection_.id(), id_); + } + + // + // Envoy::Http::StreamCallbacks + // + + virtual void onResetStream(Envoy::Http::StreamResetReason reason, + absl::string_view) override { + // TODO test with h2 to see if we get these and whether the connection error + // handling is enough to handle it. + switch (reason) { + case Envoy::Http::StreamResetReason::LocalReset: + ENVOY_LOG(trace, "ServerStream({}:{}:{}) was locally reset", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::LocalRefusedStreamReset: + ENVOY_LOG(trace, "ServerStream({}:{}:{}) refused local stream reset", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::RemoteReset: + ENVOY_LOG(trace, "ServerStream({}:{}:{}) was remotely reset", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::RemoteRefusedStreamReset: + ENVOY_LOG(trace, "ServerStream({}:{}:{}) refused remote stream reset", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::ConnectionFailure: + ENVOY_LOG( + trace, + "ServerStream({}:{}:{}) reseet due to initial connection failure", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::ConnectionTermination: + ENVOY_LOG( + trace, + "ServerStream({}:{}:{}) reset due to underlying connection reset", + connection_.name(), connection_.id(), id_); + break; + case Envoy::Http::StreamResetReason::Overflow: + ENVOY_LOG(trace, + "ServerStream({}:{}:{}) reset due to resource overflow", + connection_.name(), connection_.id(), id_); + break; + default: + ENVOY_LOG(trace, "ServerStream({}:{}:{}) reset due to unknown reason", + connection_.name(), connection_.id(), id_); + break; + } + } + + virtual void onAboveWriteBufferHighWatermark() override { + // TODO is their anything to be done here? + ENVOY_LOG(trace, "ServerStream({}:{}:{}) above write buffer high watermark", + connection_.name(), connection_.id(), id_); + } + + virtual void onBelowWriteBufferLowWatermark() override { + // TODO is their anything to be done here? + ENVOY_LOG(trace, "ServerStream({}:{}:{}) below write buffer low watermark", + connection_.name(), connection_.id(), id_); + } + + private: + virtual void onEndStream() { + ENVOY_LOG(debug, "ServerStream({}:{}:{}) complete", connection_.name(), + connection_.id(), id_); + request_callback_(connection_, *this, std::move(request_headers_)); + + connection_.removeStream(id_); + // This stream is now destroyed + } + + ServerStreamImpl(const ServerStreamImpl &) = delete; + + ServerStreamImpl &operator=(const ServerStreamImpl &) = delete; + + uint32_t id_; + ServerConnection &connection_; + Envoy::Http::HeaderMapPtr request_headers_{nullptr}; + Envoy::Http::HeaderMapPtr response_headers_{nullptr}; + Envoy::Buffer::InstancePtr response_body_{nullptr}; + Envoy::Grpc::Status::GrpcStatus response_status_{Envoy::Grpc::Status::Ok}; + ServerRequestCallback request_callback_; + Envoy::Http::StreamEncoder &stream_encoder_; + Envoy::Event::TimerPtr delay_timer_{nullptr}; +}; + +ServerConnection::ServerConnection( + const std::string &name, uint32_t id, + ServerRequestCallback request_callback, ServerCloseCallback close_callback, + Envoy::Network::Connection &network_connection, + Envoy::Event::Dispatcher &dispatcher, + Envoy::Http::CodecClient::Type http_type, Envoy::Stats::Scope &scope) + : name_(name), + id_(id), + network_connection_(network_connection), + dispatcher_(dispatcher), + request_callback_(request_callback), + close_callback_(close_callback) { + // TODO make use of network_connection_->socketOptions() and possibly http + // settings; + constexpr uint32_t max_request_headers_kb = 2U; + + switch (http_type) { + case Envoy::Http::CodecClient::Type::HTTP1: + http_connection_ = + std::make_unique( + network_connection, *this, Envoy::Http::Http1Settings(), + max_request_headers_kb); + break; + case Envoy::Http::CodecClient::Type::HTTP2: { + Envoy::Http::Http2Settings settings; + settings.allow_connect_ = true; + settings.allow_metadata_ = true; + http_connection_ = + std::make_unique( + network_connection, *this, scope, settings, + max_request_headers_kb); + } break; + default: + ENVOY_LOG(error, + "ServerConnection({}:{}) doesn't support http type %d, " + "defaulting to HTTP1", + name_, id_, static_cast(http_type) + 1); + http_connection_ = + std::make_unique( + network_connection, *this, Envoy::Http::Http1Settings(), + max_request_headers_kb); + break; + } +} + +ServerConnection::~ServerConnection() { + ENVOY_LOG(trace, "ServerConnection({}:{}) destroyed", name_, id_); +} + +const std::string &ServerConnection::name() const { return name_; } + +uint32_t ServerConnection::id() const { return id_; } + +Envoy::Network::Connection &ServerConnection::networkConnection() { + return network_connection_; +} + +const Envoy::Network::Connection &ServerConnection::networkConnection() const { + return network_connection_; +} + +Envoy::Http::ServerConnection &ServerConnection::httpConnection() { + return *http_connection_; +} + +const Envoy::Http::ServerConnection &ServerConnection::httpConnection() const { + return *http_connection_; +} + +Envoy::Event::Dispatcher &ServerConnection::dispatcher() { return dispatcher_; } + +Envoy::Network::FilterStatus ServerConnection::onData( + Envoy::Buffer::Instance &data, bool end_stream) { + ENVOY_LOG(trace, "ServerConnection({}:{}) got data", name_, id_); + + try { + http_connection_->dispatch(data); + } catch (const Envoy::Http::CodecProtocolException &e) { + ENVOY_LOG(error, "ServerConnection({}:{}) received the wrong protocol: {}", + name_, id_, e.what()); + network_connection_.close(Envoy::Network::ConnectionCloseType::NoFlush); + return Envoy::Network::FilterStatus::StopIteration; + } + + if (end_stream) { + ENVOY_LOG(error, + "ServerConnection({}:{}) got end stream - TODO relay to all " + "active streams?!?", + name_, id_); + } + + return Envoy::Network::FilterStatus::StopIteration; +} + +Envoy::Network::FilterStatus ServerConnection::onNewConnection() { + ENVOY_LOG(trace, "ServerConnection({}:{}) onNewConnection", name_, id_); + return Envoy::Network::FilterStatus::Continue; +} + +void ServerConnection::initializeReadFilterCallbacks( + Envoy::Network::ReadFilterCallbacks &) {} + +Envoy::Http::StreamDecoder &ServerConnection::newStream( + Envoy::Http::StreamEncoder &stream_encoder, bool) { + ServerStreamImpl *raw = nullptr; + uint32_t id = 0U; + + { + std::lock_guard guard(streams_lock_); + + id = stream_counter_++; + auto stream = std::make_unique( + id, *this, request_callback_, stream_encoder); + raw = stream.get(); + streams_[id] = std::move(stream); + } + + ENVOY_LOG(debug, "ServerConnection({}:{}) received new Stream({}:{}:{})", + name_, id_, name_, id_, id); + + return *raw; +} + +void ServerConnection::removeStream(uint32_t stream_id) { + unsigned long size = 0UL; + + { + std::lock_guard guard(streams_lock_); + streams_.erase(stream_id); + size = streams_.size(); + } + + if (0 == size) { + // TODO do anything special here? + ENVOY_LOG(debug, "ServerConnection({}:{}) is idle", name_, id_); + } +} + +void ServerConnection::onEvent(Envoy::Network::ConnectionEvent event) { + switch (event) { + case Envoy::Network::ConnectionEvent::RemoteClose: + ENVOY_LOG(debug, "ServerConnection({}:{}) closed by peer or reset", name_, + id_); + close_callback_(*this, ServerCloseReason::REMOTE_CLOSE); + return; + case Envoy::Network::ConnectionEvent::LocalClose: + ENVOY_LOG(debug, "ServerConnection({}:{}) closed locally", name_, id_); + close_callback_(*this, ServerCloseReason::LOCAL_CLOSE); + return; + default: + ENVOY_LOG(error, "ServerConnection({}:{}) got unknown event", name_, id_); + } +} + +void ServerConnection::onAboveWriteBufferHighWatermark() { + ENVOY_LOG(debug, "ServerConnection({}:{}) above write buffer high watermark", + name_, id_); + // TODO - is this the right way to handle? + http_connection_->onUnderlyingConnectionAboveWriteBufferHighWatermark(); +} + +void ServerConnection::onBelowWriteBufferLowWatermark() { + ENVOY_LOG(debug, "ServerConnection({}:{}) below write buffer low watermark", + name_, id_); + // TODO - is this the right way to handle? + http_connection_->onUnderlyingConnectionBelowWriteBufferLowWatermark(); +} + +void ServerConnection::onGoAway() { + ENVOY_LOG(warn, "ServerConnection({}) got go away", name_); + // TODO how should this be handled? I've never seen it fire. +} + +ServerFilterChain::ServerFilterChain( + Envoy::Network::TransportSocketFactory &transport_socket_factory) + : transport_socket_factory_(transport_socket_factory) {} + +ServerFilterChain::~ServerFilterChain() {} + +const Envoy::Network::TransportSocketFactory & +ServerFilterChain::transportSocketFactory() const { + return transport_socket_factory_; +} + +const std::vector + &ServerFilterChain::networkFilterFactories() const { + return network_filter_factories_; +} + +LocalListenSocket::LocalListenSocket( + Envoy::Network::Address::IpVersion ip_version, uint16_t port, + const Envoy::Network::Socket::OptionsSharedPtr &options, bool bind_to_port) + : NetworkListenSocket( + Envoy::Network::Utility::parseInternetAddress( + Envoy::Network::Test::getAnyAddressUrlString(ip_version), port), + options, bind_to_port) {} + +LocalListenSocket::~LocalListenSocket() {} + +ServerCallbackHelper::ServerCallbackHelper( + ServerRequestCallback request_callback, + ServerAcceptCallback accept_callback, ServerCloseCallback close_callback) + : accept_callback_(accept_callback), + request_callback_(request_callback), + close_callback_(close_callback) { + if (request_callback) { + request_callback_ = [this, &request_callback]( + ServerConnection &connection, ServerStream &stream, + Envoy::Http::HeaderMapPtr request_headers) { + ++requests_received_; + request_callback(connection, stream, std::move(request_headers)); + }; + } else { + request_callback_ = [this](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ++requests_received_; + Envoy::Http::TestHeaderMapImpl response{{":status", "200"}}; + stream.sendResponseHeaders(response); + }; + } + + if (accept_callback) { + accept_callback_ = + [this, &accept_callback]( + ServerConnection &connection) -> ServerCallbackResult { + ++accepts_; + return accept_callback(connection); + }; + } else { + accept_callback_ = [this](ServerConnection &) -> ServerCallbackResult { + ++accepts_; + return ServerCallbackResult::CONTINUE; + }; + } + + if (close_callback) { + close_callback_ = [this, &close_callback](ServerConnection &connection, + ServerCloseReason reason) { + absl::MutexLock lock(&mutex_); + + switch (reason) { + case ServerCloseReason::REMOTE_CLOSE: + ++remote_closes_; + break; + case ServerCloseReason::LOCAL_CLOSE: + ++local_closes_; + break; + } + + close_callback(connection, reason); + }; + } else { + close_callback_ = [this](ServerConnection &, ServerCloseReason reason) { + absl::MutexLock lock(&mutex_); + + switch (reason) { + case ServerCloseReason::REMOTE_CLOSE: + ++remote_closes_; + break; + case ServerCloseReason::LOCAL_CLOSE: + ++local_closes_; + break; + } + }; + } +} + +ServerCallbackHelper::~ServerCallbackHelper() {} + +uint32_t ServerCallbackHelper::connectionsAccepted() const { return accepts_; } + +uint32_t ServerCallbackHelper::requestsReceived() const { + return requests_received_; +} + +uint32_t ServerCallbackHelper::localCloses() const { + absl::MutexLock lock(&mutex_); + return local_closes_; +} + +uint32_t ServerCallbackHelper::remoteCloses() const { + absl::MutexLock lock(&mutex_); + return remote_closes_; +} + +ServerAcceptCallback ServerCallbackHelper::acceptCallback() const { + return accept_callback_; +} + +ServerRequestCallback ServerCallbackHelper::requestCallback() const { + return request_callback_; +} + +ServerCloseCallback ServerCallbackHelper::closeCallback() const { + return close_callback_; +} + +void ServerCallbackHelper::wait(uint32_t connections_closed) { + auto constraints = [connections_closed, this]() { + return connections_closed <= local_closes_ + remote_closes_; + }; + + absl::MutexLock lock(&mutex_); + mutex_.Await(absl::Condition(&constraints)); +} + +void ServerCallbackHelper::wait() { + auto constraints = [this]() { + return accepts_ <= local_closes_ + remote_closes_; + }; + + absl::MutexLock lock(&mutex_); + mutex_.Await(absl::Condition(&constraints)); +} + +Server::Server(const std::string &name, + Envoy::Network::Socket &listening_socket, + Envoy::Network::TransportSocketFactory &transport_socket_factory, + Envoy::Http::CodecClient::Type http_type) + : name_(name), + stats_(), + time_system_(), + api_(Envoy::Thread::ThreadFactorySingleton::get(), stats_, time_system_), + dispatcher_(api_.allocateDispatcher()), + connection_handler_(new Envoy::Server::ConnectionHandlerImpl( + ENVOY_LOGGER(), *dispatcher_)), + thread_(nullptr), + listening_socket_(listening_socket), + server_filter_chain_(transport_socket_factory), + http_type_(http_type) {} + +Server::~Server() { stop(); } + +void Server::start(ServerAcceptCallback accept_callback, + ServerRequestCallback request_callback, + ServerCloseCallback close_callback) { + accept_callback_ = accept_callback; + request_callback_ = request_callback; + close_callback_ = close_callback; + std::promise promise; + + thread_ = api_.threadFactory().createThread([this, &promise]() { + is_running = true; + ENVOY_LOG(debug, "Server({}) started", name_.c_str()); + connection_handler_->addListener(*this); + + promise.set_value(true); // do not use promise again after this + while (is_running) { + dispatcher_->run(Envoy::Event::Dispatcher::RunType::NonBlock); + } + + ENVOY_LOG(debug, "Server({}) stopped", name_.c_str()); + + connection_handler_.reset(); + }); + + promise.get_future().get(); +} + +void Server::start(ServerCallbackHelper &helper) { + start(helper.acceptCallback(), helper.requestCallback(), + helper.closeCallback()); +} + +void Server::stop() { + is_running = false; + + if (thread_) { + thread_->join(); + thread_ = nullptr; + } +} + +void Server::stopAcceptingConnections() { + ENVOY_LOG(debug, "Server({}) stopped accepting connections", name_); + connection_handler_->disableListeners(); +} + +void Server::startAcceptingConnections() { + ENVOY_LOG(debug, "Server({}) started accepting connections", name_); + connection_handler_->enableListeners(); +} + +const Envoy::Stats::Store &Server::statsStore() const { return stats_; } + +void Server::setPerConnectionBufferLimitBytes(uint32_t limit) { + connection_buffer_limit_bytes_ = limit; +} + +// +// Envoy::Network::ListenerConfig +// + +Envoy::Network::FilterChainManager &Server::filterChainManager() { + return *this; +} + +Envoy::Network::FilterChainFactory &Server::filterChainFactory() { + return *this; +} + +Envoy::Network::Socket &Server::socket() { return listening_socket_; } + +const Envoy::Network::Socket &Server::socket() const { + return listening_socket_; +} + +bool Server::bindToPort() { return true; } + +bool Server::handOffRestoredDestinationConnections() const { return false; } + +uint32_t Server::perConnectionBufferLimitBytes() const { + return connection_buffer_limit_bytes_; +} + +std::chrono::milliseconds Server::listenerFiltersTimeout() const { + return std::chrono::milliseconds(0); +} + +Envoy::Stats::Scope &Server::listenerScope() { return stats_; } + +uint64_t Server::listenerTag() const { return 0; } + +const std::string &Server::name() const { return name_; } + +const Envoy::Network::FilterChain *Server::findFilterChain( + const Envoy::Network::ConnectionSocket &) const { + return &server_filter_chain_; +} + +bool Server::createNetworkFilterChain( + Envoy::Network::Connection &network_connection, + const std::vector &) { + uint32_t id = connection_counter_++; + ENVOY_LOG(debug, "Server({}) accepted new Connection({}:{})", name_, name_, + id); + + ServerConnectionSharedPtr connection = std::make_shared( + name_, id, request_callback_, close_callback_, network_connection, + *dispatcher_, http_type_, stats_); + network_connection.addReadFilter(connection); + network_connection.addConnectionCallbacks(*connection); + + if (ServerCallbackResult::CLOSE == accept_callback_(*connection)) { + // Envoy will close the connection immediately, which will in turn + // trigger the user supplied close callback. + return false; + } + + return true; +} + +bool Server::createListenerFilterChain( + Envoy::Network::ListenerFilterManager &) { + return true; +} + +ClusterHelper::ClusterHelper( + std::initializer_list server_callbacks) { + for (auto it = server_callbacks.begin(); it != server_callbacks.end(); ++it) { + server_callback_helpers_.emplace_back(*it); + } +} + +ClusterHelper::~ClusterHelper() {} + +const std::vector &ClusterHelper::servers() const { + return server_callback_helpers_; +} + +std::vector &ClusterHelper::servers() { + return server_callback_helpers_; +} + +uint32_t ClusterHelper::connectionsAccepted() const { + uint32_t total = 0U; + + for (size_t i = 0; i < server_callback_helpers_.size(); ++i) { + total += server_callback_helpers_[i]->connectionsAccepted(); + } + + return total; +} + +uint32_t ClusterHelper::requestsReceived() const { + uint32_t total = 0U; + + for (size_t i = 0; i < server_callback_helpers_.size(); ++i) { + total += server_callback_helpers_[i]->requestsReceived(); + } + + return total; +} + +uint32_t ClusterHelper::localCloses() const { + uint32_t total = 0U; + + for (size_t i = 0; i < server_callback_helpers_.size(); ++i) { + total += server_callback_helpers_[i]->localCloses(); + } + + return total; +} + +uint32_t ClusterHelper::remoteCloses() const { + uint32_t total = 0U; + + for (size_t i = 0; i < server_callback_helpers_.size(); ++i) { + total += server_callback_helpers_[i]->remoteCloses(); + } + + return total; +} + +void ClusterHelper::wait() { + for (size_t i = 0; i < server_callback_helpers_.size(); ++i) { + server_callback_helpers_[i]->wait(); + } +} + +} // namespace Integration +} // namespace Mixer diff --git a/test/integration/int_server.h b/test/integration/int_server.h new file mode 100644 index 00000000000..2e2789b84c2 --- /dev/null +++ b/test/integration/int_server.h @@ -0,0 +1,448 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include "common/api/api_impl.h" +#include "common/grpc/common.h" +#include "common/http/codec_client.h" +#include "common/network/listen_socket_impl.h" +#include "common/stats/isolated_store_impl.h" +#include "test/test_common/test_time.h" +#include "test/test_common/utility.h" + +namespace Mixer { +namespace Integration { + +enum class ServerCloseReason { + REMOTE_CLOSE, // Peer closed or connection was reset after it was + // established. + LOCAL_CLOSE // This process decided to close the connection. +}; + +enum class ServerCallbackResult { + CONTINUE, // Leave the connection open + CLOSE // Close the connection. +}; + +class ServerStream { + public: + ServerStream(); + + virtual ~ServerStream(); + + ServerStream(ServerStream &&) = default; + ServerStream &operator=(ServerStream &&) = default; + + /** + * Send a HTTP header-only response and close the stream. + * + * @param response_headers the response headers + * @param delay delay in msec before sending the response. if 0 send + * immediately + */ + virtual void sendResponseHeaders( + const Envoy::Http::HeaderMap &response_headers, + const std::chrono::milliseconds delay = + std::chrono::milliseconds(0)) PURE; + + /** + * Send a gRPC response and close the stream + * + * @param status The gRPC status (carried in the HTTP response trailer) + * @param response The gRPC response (carried in the HTTP response body) + * @param delay delay in msec before sending the response. if 0 send + * immediately + */ + virtual void sendGrpcResponse(Envoy::Grpc::Status::GrpcStatus status, + const Envoy::Protobuf::Message &response, + const std::chrono::milliseconds delay = + std::chrono::milliseconds(0)) PURE; + + private: + ServerStream(const ServerStream &) = delete; + void operator=(const ServerStream &) = delete; +}; + +typedef std::unique_ptr ServerStreamPtr; +typedef std::shared_ptr ServerStreamSharedPtr; + +class ServerConnection; + +// NB: references passed to any of these callbacks are owned by the caller and +// must not be used after the callback returns -- except for the request headers +// which may be moved into the caller. +typedef std::function + ServerAcceptCallback; +typedef std::function + ServerCloseCallback; +// TODO support sending delayed responses +typedef std::function + ServerRequestCallback; + +class ServerConnection : public Envoy::Network::ReadFilter, + public Envoy::Network::ConnectionCallbacks, + public Envoy::Http::ServerConnectionCallbacks, + Envoy::Logger::Loggable { + public: + ServerConnection(const std::string &name, uint32_t id, + ServerRequestCallback request_callback, + ServerCloseCallback close_callback, + Envoy::Network::Connection &network_connection, + Envoy::Event::Dispatcher &dispatcher, + Envoy::Http::CodecClient::Type http_type, + Envoy::Stats::Scope &scope); + + virtual ~ServerConnection(); + + ServerConnection(ServerConnection &&) = default; + ServerConnection &operator=(ServerConnection &&) = default; + + const std::string &name() const; + + uint32_t id() const; + + Envoy::Network::Connection &networkConnection(); + const Envoy::Network::Connection &networkConnection() const; + + Envoy::Http::ServerConnection &httpConnection(); + const Envoy::Http::ServerConnection &httpConnection() const; + + Envoy::Event::Dispatcher &dispatcher(); + + /** + * For internal use + */ + void removeStream(uint32_t stream_id); + + // + // Envoy::Network::ReadFilter + // + + virtual Envoy::Network::FilterStatus onData(Envoy::Buffer::Instance &data, + bool end_stream) override; + + virtual Envoy::Network::FilterStatus onNewConnection() override; + + virtual void initializeReadFilterCallbacks( + Envoy::Network::ReadFilterCallbacks &) override; + + // + // Envoy::Http::ConnectionCallbacks + // + + virtual void onGoAway() override; + + // + // Envoy::Http::ServerConnectionCallbacks + // + + virtual Envoy::Http::StreamDecoder &newStream( + Envoy::Http::StreamEncoder &stream_encoder, + bool is_internally_created = false) override; + + // + // Envoy::Network::ConnectionCallbacks + // + + virtual void onEvent(Envoy::Network::ConnectionEvent event) override; + + virtual void onAboveWriteBufferHighWatermark() override; + + virtual void onBelowWriteBufferLowWatermark() override; + + private: + ServerConnection(const ServerConnection &) = delete; + ServerConnection &operator=(const ServerConnection &) = delete; + + std::string name_; + uint32_t id_; + Envoy::Network::Connection &network_connection_; + Envoy::Http::ServerConnectionPtr http_connection_; + Envoy::Event::Dispatcher &dispatcher_; + ServerRequestCallback request_callback_; + ServerCloseCallback close_callback_; + + std::mutex streams_lock_; + std::unordered_map streams_; + uint32_t stream_counter_{0U}; +}; + +typedef std::unique_ptr ServerConnectionPtr; +typedef std::shared_ptr ServerConnectionSharedPtr; + +class ServerFilterChain : public Envoy::Network::FilterChain { + public: + ServerFilterChain( + Envoy::Network::TransportSocketFactory &transport_socket_factory); + + virtual ~ServerFilterChain(); + + ServerFilterChain(ServerFilterChain &&) = default; + ServerFilterChain &operator=(ServerFilterChain &&) = default; + + // + // Envoy::Network::FilterChain + // + + virtual const Envoy::Network::TransportSocketFactory &transportSocketFactory() + const override; + + virtual const std::vector + &networkFilterFactories() const override; + + private: + ServerFilterChain(const ServerFilterChain &) = delete; + ServerFilterChain &operator=(const ServerFilterChain &) = delete; + + Envoy::Network::TransportSocketFactory &transport_socket_factory_; + std::vector network_filter_factories_; +}; + +/** + * A convenience class for creating a listening socket bound to localhost + */ +class LocalListenSocket : public Envoy::Network::TcpListenSocket { + public: + /** + * Create a listening socket bound to localhost. + * + * @param ip_version v4 or v6. v4 by default. + * @param port the port. If 0, let the kernel allocate an avaiable ephemeral + * port. 0 by default. + * @param options socket options. nullptr by default + * @param bind_to_port if true immediately bind to the port, allocating one if + * necessary. true by default. + */ + LocalListenSocket( + Envoy::Network::Address::IpVersion ip_version = + Envoy::Network::Address::IpVersion::v4, + uint16_t port = 0, + const Envoy::Network::Socket::OptionsSharedPtr &options = nullptr, + bool bind_to_port = true); + + virtual ~LocalListenSocket(); + + LocalListenSocket(LocalListenSocket &&) = default; + LocalListenSocket &operator=(LocalListenSocket &&) = default; + + private: + LocalListenSocket(const LocalListenSocket &) = delete; + void operator=(const LocalListenSocket &) = delete; +}; + +/** + * A convenience class for passing callbacks to a Server. If no callbacks are + * provided, default callbacks that track some simple metrics will be used. If + * callbacks are provided, they will be wrapped with callbacks that maintain the + * same simple set of metrics. + */ +class ServerCallbackHelper { + public: + ServerCallbackHelper(ServerRequestCallback request_callback = nullptr, + ServerAcceptCallback accept_callback = nullptr, + ServerCloseCallback close_callback = nullptr); + + virtual ~ServerCallbackHelper(); + + ServerCallbackHelper(ServerCallbackHelper &&) = default; + ServerCallbackHelper &operator=(ServerCallbackHelper &&) = default; + + uint32_t connectionsAccepted() const; + uint32_t requestsReceived() const; + uint32_t localCloses() const; + uint32_t remoteCloses() const; + ServerAcceptCallback acceptCallback() const; + ServerRequestCallback requestCallback() const; + ServerCloseCallback closeCallback() const; + + /* + * Wait until the server has accepted n connections and seen them closed (due + * to error or client close) + */ + void wait(uint32_t connections); + + /* + * Wait until the server has seen a close for every connection it has + * accepted. + */ + void wait(); + + private: + ServerCallbackHelper(const ServerCallbackHelper &) = delete; + void operator=(const ServerCallbackHelper &) = delete; + + ServerAcceptCallback accept_callback_; + ServerRequestCallback request_callback_; + ServerCloseCallback close_callback_; + + std::atomic accepts_{0}; + std::atomic requests_received_{0}; + uint32_t local_closes_{0}; + uint32_t remote_closes_{0}; + mutable absl::Mutex mutex_; +}; + +typedef std::unique_ptr ServerCallbackHelperPtr; +typedef std::shared_ptr ServerCallbackHelperSharedPtr; + +class Server : public Envoy::Network::FilterChainManager, + public Envoy::Network::FilterChainFactory, + public Envoy::Network::ListenerConfig, + Envoy::Logger::Loggable { + public: + // TODO make use of Network::Socket::OptionsSharedPtr + Server(const std::string &name, Envoy::Network::Socket &listening_socket, + Envoy::Network::TransportSocketFactory &transport_socket_factory, + Envoy::Http::CodecClient::Type http_type); + + virtual ~Server(); + + Server(Server &&) = default; + Server &operator=(Server &&) = default; + + void start(ServerAcceptCallback accept_callback, + ServerRequestCallback request_callback, + ServerCloseCallback close_callback); + + void start(ServerCallbackHelper &helper); + + void stop(); + + void stopAcceptingConnections(); + + void startAcceptingConnections(); + + const Envoy::Stats::Store &statsStore() const; + + // TODO does this affect socket recv buffer size? Only for new connections? + void setPerConnectionBufferLimitBytes(uint32_t limit); + + // + // Envoy::Network::ListenerConfig + // + + virtual Envoy::Network::FilterChainManager &filterChainManager() override; + + virtual Envoy::Network::FilterChainFactory &filterChainFactory() override; + + virtual Envoy::Network::Socket &socket() override; + + virtual const Envoy::Network::Socket &socket() const override; + + virtual bool bindToPort() override; + + virtual bool handOffRestoredDestinationConnections() const override; + + // TODO does this affect socket recv buffer size? Only for new connections? + virtual uint32_t perConnectionBufferLimitBytes() const override; + + virtual std::chrono::milliseconds listenerFiltersTimeout() const override; + + virtual Envoy::Stats::Scope &listenerScope() override; + + virtual uint64_t listenerTag() const override; + + virtual const std::string &name() const override; + + // + // Envoy::Network::FilterChainManager + // + + virtual const Envoy::Network::FilterChain *findFilterChain( + const Envoy::Network::ConnectionSocket &) const override; + + // + // Envoy::Network::FilterChainFactory + // + + virtual bool createNetworkFilterChain( + Envoy::Network::Connection &network_connection, + const std::vector &) override; + + virtual bool createListenerFilterChain( + Envoy::Network::ListenerFilterManager &) override; + + private: + Server(const Server &) = delete; + void operator=(const Server &) = delete; + + std::string name_; + Envoy::Stats::IsolatedStoreImpl stats_; + Envoy::Event::TestRealTimeSystem time_system_; + Envoy::Api::Impl api_; + Envoy::Event::DispatcherPtr dispatcher_; + Envoy::Network::ConnectionHandlerPtr connection_handler_; + Envoy::Thread::ThreadPtr thread_; + std::atomic is_running{false}; + + ServerAcceptCallback accept_callback_{nullptr}; + ServerRequestCallback request_callback_{nullptr}; + ServerCloseCallback close_callback_{nullptr}; + + // + // Envoy::Network::ListenerConfig + // + + Envoy::Network::Socket &listening_socket_; + std::atomic connection_buffer_limit_bytes_{0U}; + + // + // Envoy::Network::FilterChainManager + // + + ServerFilterChain server_filter_chain_; + + // + // Envoy::Network::FilterChainFactory + // + + Envoy::Http::CodecClient::Type http_type_; + std::atomic connection_counter_{0U}; +}; + +typedef std::unique_ptr ServerPtr; +typedef std::shared_ptr ServerSharedPtr; + +class ClusterHelper { + public: + /*template + ClusterHelper(Args &&... args) : servers_(std::forward(args)...){};*/ + + ClusterHelper(std::initializer_list server_callbacks); + + virtual ~ClusterHelper(); + + const std::vector &servers() const; + std::vector &servers(); + + uint32_t connectionsAccepted() const; + uint32_t requestsReceived() const; + uint32_t localCloses() const; + uint32_t remoteCloses() const; + + void wait(); + + private: + ClusterHelper(const ClusterHelper &) = delete; + void operator=(const ClusterHelper &) = delete; + + std::vector server_callback_helpers_; +}; + +} // namespace Integration +} // namespace Mixer diff --git a/test/integration/mixer_fault_test.cc b/test/integration/mixer_fault_test.cc new file mode 100644 index 00000000000..8757a8bf477 --- /dev/null +++ b/test/integration/mixer_fault_test.cc @@ -0,0 +1,1348 @@ +/* Copyright 2019 Istio Authors. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include + +#include "absl/strings/match.h" +#include "gtest/gtest.h" +#include "include/istio/mixerclient/options.h" +#include "int_client.h" +#include "int_server.h" +#include "mixer/v1/mixer.pb.h" +#include "test/integration/http_integration.h" +#include "test/test_common/network_utility.h" + +#define EXPECT_IN_RANGE(val, min, max) \ + EXPECT_LE(val, max); \ + EXPECT_GE(val, min) + +namespace Mixer { +namespace Integration { + +enum class NetworkFailPolicy { FAIL_OPEN = 0, FAIL_CLOSED = 1 }; + +inline static int networkFailPolicyToInt(NetworkFailPolicy policy) { + switch (policy) { + case NetworkFailPolicy::FAIL_OPEN: + return 0; + default: + return 1; + } +} + +class MixerFaultTest : public Envoy::HttpIntegrationTest, public testing::Test { + public: + MixerFaultTest() + : HttpIntegrationTest( + Envoy::Http::CodecClient::Type::HTTP1, + Envoy::Network::Address::IpVersion::v4, + std::make_unique()), + transport_socket_factory_(), + client_("client") { + Envoy::Http::CodecClient::Type origin_protocol = + Envoy::Http::CodecClient::Type::HTTP2; + setUpstreamProtocol(Envoy::Http::CodecClient::Type::HTTP2 == origin_protocol + ? Envoy::FakeHttpConnection::Type::HTTP2 + : Envoy::FakeHttpConnection::Type::HTTP1); + + // Tell the base class that we will create our own upstream origin server. + fake_upstreams_count_ = 0; + + origin_listeners_.emplace_back(new LocalListenSocket()); + origin_servers_.emplace_back( + new Server(fmt::sprintf("origin-0"), *origin_listeners_.back(), + transport_socket_factory_, origin_protocol)); + } + + virtual ~MixerFaultTest() {} + + // TODO modify BaseIntegrationTest in Envoy to eliminate this copy of the + // createEnvoy function. + virtual void createEnvoy() override { + std::vector ports; + + // TODO modify BaseIntegrationTest to add additional ports without having to + // make them fake upstreams + addPorts(ports); + + config_helper_.finalize(ports); + + // TODO modify BaseIntegrationTest use protected inheritance for + // Envoy::Logger::Loggable so tests can use ENVOY_LOG fprintf(stderr, + // "Running Envoy with configuration:\n%s", + // config_helper_.bootstrap().DebugString().c_str()); + + const std::string bootstrap_path = + Envoy::TestEnvironment::writeStringToFileForTest( + "bootstrap.json", Envoy::MessageUtil::getJsonStringFromMessage( + config_helper_.bootstrap())); + + std::vector named_ports; + const auto &static_resources = + config_helper_.bootstrap().static_resources(); + for (int i = 0; i < static_resources.listeners_size(); ++i) { + named_ports.push_back(static_resources.listeners(i).name()); + } + createGeneratedApiTestServer(bootstrap_path, named_ports); + } + + // Must be called before Envoy is stopped + void extractCounters(const std::string &prefix, + std::unordered_map &counters) { + for (auto counter : test_server_->stat_store().counters()) { + if (!absl::StartsWith(counter->name(), prefix)) { + continue; + } + + counters[counter->name()] = counter->value(); + } + } + + void dumpCounters(const std::unordered_map &counters) { + for (auto it : counters) { + std::cerr << it.first << " = " << it.second << std::endl; + } + } + + protected: + LoadGeneratorPtr startServers(NetworkFailPolicy fail_policy, + ServerCallbackHelper &origin_callbacks, + ClusterHelper &policy_cluster, + ClusterHelper &telemetry_cluster, + uint32_t retries = 0, + uint32_t base_retry_ms = 10, + uint32_t max_retry_ms = 100) { + for (size_t i = 0; i < origin_servers_.size(); ++i) { + origin_servers_[i]->start(origin_callbacks); + } + + for (size_t i = 0; i < policy_cluster.servers().size(); ++i) { + policy_listeners_.emplace_back(new LocalListenSocket()); + policy_servers_.emplace_back(new Server( + fmt::sprintf("policy-%d", i), *policy_listeners_.back(), + transport_socket_factory_, Envoy::Http::CodecClient::Type::HTTP2)); + policy_servers_.back()->start(*policy_cluster.servers()[i]); + } + + for (size_t i = 0; i < telemetry_cluster.servers().size(); ++i) { + telemetry_listeners_.emplace_back(new LocalListenSocket()); + telemetry_servers_.emplace_back(new Server( + fmt::sprintf("telemetry-%d", i), *telemetry_listeners_.back(), + transport_socket_factory_, Envoy::Http::CodecClient::Type::HTTP2)); + telemetry_servers_.back()->start(*telemetry_cluster.servers()[i]); + } + + std::string telemetry_name("telemetry-backend"); + std::string policy_name("policy-backend"); + + addNodeMetadata(); + configureMixerFilter(fail_policy, policy_name, telemetry_name, retries, + base_retry_ms, max_retry_ms); + addCluster(telemetry_name, telemetry_listeners_); + addCluster(policy_name, policy_listeners_); + + // This calls createEnvoy() (see below) and then starts envoy + HttpIntegrationTest::initialize(); + + auto addr = Envoy::Network::Utility::parseInternetAddress( + "127.0.0.1", static_cast(lookupPort("http"))); + return std::make_unique(client_, transport_socket_factory_, + HttpVersion::HTTP1, addr); + } + + private: + void addPorts(std::vector &ports) { + // origin must come first. The order of the rest depends on the order their + // cluster was added to the config. + for (size_t i = 0; i < origin_listeners_.size(); ++i) { + ports.push_back(origin_listeners_[i]->localAddress()->ip()->port()); + } + + for (size_t i = 0; i < telemetry_listeners_.size(); ++i) { + ports.push_back(telemetry_listeners_[i]->localAddress()->ip()->port()); + } + + for (size_t i = 0; i < policy_listeners_.size(); ++i) { + ports.push_back(policy_listeners_[i]->localAddress()->ip()->port()); + } + } + + void addNodeMetadata() { + config_helper_.addConfigModifier( + [](envoy::config::bootstrap::v2::Bootstrap &bootstrap) { + ::google::protobuf::Struct meta; + + Envoy::MessageUtil::loadFromJson(R"({ + "ISTIO_VERSION": "1.0.1", + "NODE_UID": "pod", + "NODE_NAMESPACE": "kubernetes://dest.pod" + })", + meta); + + bootstrap.mutable_node()->mutable_metadata()->MergeFrom(meta); + }); + } + + void configureMixerFilter(NetworkFailPolicy fail_policy, + const std::string &policy_name, + const std::string &telemetry_name, uint32_t retries, + uint32_t base_retry_ms, uint32_t max_retry_ms) { + const uint32_t base_retry_sec = base_retry_ms / 1000; + const uint32_t base_retry_nanos = base_retry_sec % 1000 * 1'000'000; + const uint32_t max_retry_sec = max_retry_ms / 1000; + const uint32_t max_retry_nanos = max_retry_sec % 1000 * 1'000'000; + constexpr char sourceUID[] = "kubernetes://src.pod"; + + std::string mixer_conf{fmt::sprintf( + R"EOF( + name: mixer + config: + defaultDestinationService: "default" + mixerAttributes: + attributes: {} + serviceConfigs: { + "default": {} + } + transport: + attributes_for_mixer_proxy: + attributes: { + "source.uid": { + string_value: %s + } + } + network_fail_policy: { + policy: %d, + max_retry: %u, + base_retry_wait: { + seconds: %u, + nanos: %u + }, + max_retry_wait: { + seconds: %u, + nanos: %u + } + } + stats_update_interval: { + seconds: %u, + nanos: %u + } + report_cluster: %s + check_cluster: %s + )EOF", + sourceUID, networkFailPolicyToInt(fail_policy), retries, base_retry_sec, + base_retry_nanos, max_retry_sec, max_retry_nanos, 0U, 1'000'000, + telemetry_name.c_str(), policy_name.c_str())}; + config_helper_.addFilter(mixer_conf); + } + + void addCluster( + const std::string &name, + const std::vector &listeners) { + constexpr uint32_t max_uint32 = + 2147483647U; // protobuf max, not language max + + // See + // https://www.envoyproxy.io/docs/envoy/latest/api-v2/api/v2/cds.proto#cluster + + // TODO something in the base class clobbers the connection timeout here + std::string cluster_conf{fmt::sprintf(R"EOF( + name: %s + type: STATIC + lb_policy: ROUND_ROBIN + http2_protocol_options: { + max_concurrent_streams: %u + } + connect_timeout: 1s + max_requests_per_connection: %u + hosts: + )EOF", + name.c_str(), max_uint32, + max_uint32)}; + + for (size_t i = 0; i < listeners.size(); ++i) { + cluster_conf.append({fmt::sprintf( + R"EOF( + - socket_address: + address: %s + port_value: %d + )EOF", + Envoy::Network::Test::getLoopbackAddressString(version_), + listeners[i]->localAddress()->ip()->port())}); + } + + config_helper_.addConfigModifier( + [cluster_conf](envoy::config::bootstrap::v2::Bootstrap &bootstrap) { + bootstrap.mutable_static_resources()->add_clusters()->CopyFrom( + Envoy::TestUtility::parseYaml( + cluster_conf)); + }); + } + + Envoy::Network::RawBufferSocketFactory transport_socket_factory_; + Client client_; + std::vector origin_listeners_; + std::vector policy_listeners_; + std::vector telemetry_listeners_; + // These three vectors could store Server directly if + // Envoy::Stats::IsolatedStoreImpl was made movable. + std::vector origin_servers_; + std::vector policy_servers_; + std::vector telemetry_servers_; + Envoy::Network::Address::InstanceConstSharedPtr + envoy_address_; // at most 1 envoy +}; + +TEST_F(MixerFaultTest, HappyPath) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::err); + + constexpr NetworkFailPolicy fail_policy = NetworkFailPolicy::FAIL_CLOSED; + constexpr uint32_t connections_to_initiate = 30; + constexpr uint32_t requests_to_send = 30 * connections_to_initiate; + + // Origin server immediately sends a simple 200 OK to every request + ServerCallbackHelper origin_callbacks; + + ClusterHelper policy_cluster( + {new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + // Send a gRPC success response immediately to every policy check + ::istio::mixer::v1::CheckResponse response; + response.mutable_precondition()->mutable_status()->set_code( + google::protobuf::util::error::Code::OK); + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + })}); + + ClusterHelper telemetry_cluster( + {new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + // Send a gRPC success response immediately to every telemetry report. + ::istio::mixer::v1::ReportResponse response; + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + })}); + + LoadGeneratorPtr client = startServers(fail_policy, origin_callbacks, + policy_cluster, telemetry_cluster); + + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + client->run(connections_to_initiate, requests_to_send, std::move(request)); + + // shutdown envoy by destroying it + test_server_ = nullptr; + // wait until the upstreams have closed all connections they accepted. + // shutting down envoy should close them all + origin_callbacks.wait(); + policy_cluster.wait(); + telemetry_cluster.wait(); + + // + // Evaluate test + // + + // All client connections are successfully established. + EXPECT_EQ(client->connectSuccesses(), connections_to_initiate); + EXPECT_EQ(client->connectFailures(), 0); + // Client close callback called for every client connection. + EXPECT_EQ(client->localCloses(), connections_to_initiate); + // Client response callback is called for every request sent + EXPECT_EQ(client->responsesReceived(), requests_to_send); + // Every response was a 2xx class + EXPECT_EQ(client->class2xxResponses(), requests_to_send); + EXPECT_EQ(client->class4xxResponses(), 0); + EXPECT_EQ(client->class5xxResponses(), 0); + EXPECT_EQ(client->responseTimeouts(), 0); + // No client sockets are rudely closed by server / no client sockets are + // reset. + EXPECT_EQ(client->remoteCloses(), 0); + + // assert that the origin request callback is called for every client request + // sent + EXPECT_EQ(origin_callbacks.requestsReceived(), requests_to_send); + + // assert that the policy request callback is called for every client request + // sent + EXPECT_EQ(policy_cluster.requestsReceived(), requests_to_send); +} + +TEST_F(MixerFaultTest, FailClosedAndClosePolicySocketAfterAccept) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::err); + + constexpr NetworkFailPolicy fail_policy = NetworkFailPolicy::FAIL_CLOSED; + constexpr uint32_t connections_to_initiate = 30; + constexpr uint32_t requests_to_send = 30 * connections_to_initiate; + + // + // Setup + // + + // Origin server immediately sends a simple 200 OK to every request + ServerCallbackHelper origin_callbacks; + + ClusterHelper policy_cluster( + {// Policy server immediately closes any connection accepted. + new ServerCallbackHelper( + [](ServerConnection &, ServerStream &, + Envoy::Http::HeaderMapPtr &&) { + GTEST_FATAL_FAILURE_( + "Connections immediately closed so no response should be " + "received"); + }, + [](ServerConnection &) -> ServerCallbackResult { + return ServerCallbackResult::CLOSE; + })}); + + ClusterHelper telemetry_cluster( + {// Telemetry server sends a gRPC success response immediately to every + // telemetry report. + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::ReportResponse response; + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + })}); + + LoadGeneratorPtr client = startServers(fail_policy, origin_callbacks, + policy_cluster, telemetry_cluster); + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + client->run(connections_to_initiate, requests_to_send, std::move(request)); + + // shutdown envoy by destroying it + test_server_ = nullptr; + // wait until the upstreams have closed all connections they accepted. + // shutting down envoy should close them all + origin_callbacks.wait(); + policy_cluster.wait(); + telemetry_cluster.wait(); + + // + // Evaluate test + // + + // All client connections are successfully established. + EXPECT_EQ(client->connectSuccesses(), connections_to_initiate); + EXPECT_EQ(client->connectFailures(), 0); + // Client close callback called for every client connection. + EXPECT_EQ(client->localCloses(), connections_to_initiate); + // Client response callback is called for every request sent + EXPECT_EQ(client->responsesReceived(), requests_to_send); + // Every response was a 5xx class + EXPECT_EQ(client->class2xxResponses(), 0); + EXPECT_EQ(client->class4xxResponses(), 0); + EXPECT_EQ(client->class5xxResponses(), requests_to_send); + EXPECT_EQ(client->responseTimeouts(), 0); + // No client sockets are rudely closed by server / no client sockets are + // reset. + EXPECT_EQ(client->remoteCloses(), 0); + + // Origin server should see no requests since the mixer filter is configured + // to fail closed. + EXPECT_EQ(origin_callbacks.requestsReceived(), 0); + + // Policy server accept callback is called for every client connection + // initiated. + EXPECT_GE(policy_cluster.connectionsAccepted(), connections_to_initiate); + // Policy server request callback is never called + EXPECT_EQ(policy_cluster.requestsReceived(), 0); + // Policy server closes every connection + EXPECT_EQ(policy_cluster.connectionsAccepted(), policy_cluster.localCloses()); + EXPECT_EQ(policy_cluster.remoteCloses(), 0); +} + +TEST_F(MixerFaultTest, FailClosedAndSendPolicyResponseSlowly) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::err); + + constexpr NetworkFailPolicy fail_policy = NetworkFailPolicy::FAIL_CLOSED; + constexpr uint32_t connections_to_initiate = 30 * 30; + constexpr uint32_t requests_to_send = 1 * connections_to_initiate; + + // Origin server immediately sends a simple 200 OK to every request + ServerCallbackHelper origin_callbacks; + + ClusterHelper policy_cluster( + {// Send a gRPC success response after 60 seconds to every policy check + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::CheckResponse response; + response.mutable_precondition()->mutable_status()->set_code( + google::protobuf::util::error::Code::OK); + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(60'000)); + })}); + + ClusterHelper telemetry_cluster( + {// Sends a gRPC success response immediately to every telemetry report. + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::ReportResponse response; + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + })}); + + LoadGeneratorPtr client = startServers(fail_policy, origin_callbacks, + policy_cluster, telemetry_cluster); + + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + + client->run(connections_to_initiate, requests_to_send, std::move(request), + std::chrono::milliseconds(10'000)); + + // shutdown envoy by destroying it + test_server_ = nullptr; + // wait until the upstreams have closed all connections they accepted. + // shutting down envoy should close them all + origin_callbacks.wait(); + policy_cluster.wait(); + telemetry_cluster.wait(); + + // + // Evaluate test + // + +#ifndef __APPLE__ + // All connections are successfully established + EXPECT_EQ(client->connectSuccesses(), connections_to_initiate); + EXPECT_EQ(client->connectFailures(), 0); + // Client close callback called for every client connection. + EXPECT_EQ(client->localCloses(), connections_to_initiate); + // Client response callback is called for every request sent + EXPECT_EQ(client->responsesReceived(), requests_to_send); + // Every response was a 5xx class + EXPECT_EQ(client->class2xxResponses(), 0); + EXPECT_EQ(client->class4xxResponses(), 0); + EXPECT_EQ(client->class5xxResponses(), requests_to_send); + EXPECT_EQ(client->responseTimeouts(), 0); + // No client sockets are rudely closed by server / no client sockets are + // reset. + EXPECT_EQ(client->remoteCloses(), 0); + + // Policy server accept callback is called at least once (h2 socket reuse + // means may only be called once) + EXPECT_GE(policy_cluster.connectionsAccepted(), 1); + // Policy server request callback sees every policy check + EXPECT_EQ(requests_to_send, policy_cluster.requestsReceived()); + // Policy server closes every connection + EXPECT_EQ(policy_cluster.connectionsAccepted(), + policy_cluster.localCloses() + policy_cluster.remoteCloses()); +#else + // MacOS is a bit flakier than Linux, so broaden assetion ranges to reduce + // test flakes. + + // Most connections are successfully established. + EXPECT_IN_RANGE(client->connectSuccesses(), 0.8 * connections_to_initiate, + connections_to_initiate); + EXPECT_IN_RANGE(client->connectFailures(), 0, 0.2 * connections_to_initiate); + EXPECT_EQ(client->connectSuccesses() + client->connectFailures(), + connections_to_initiate); + // Client close callback usually called for every client connection. + EXPECT_IN_RANGE(client->localCloses(), 0.8 * connections_to_initiate, + connections_to_initiate); + // Client response callback is usually called for every request sent + EXPECT_IN_RANGE(client->responsesReceived(), 0.8 * requests_to_send, + requests_to_send); + // Most responses are a 5xx class and none are successful + EXPECT_EQ(client->class2xxResponses(), 0); + EXPECT_EQ(client->class4xxResponses(), 0); + EXPECT_IN_RANGE(client->class5xxResponses(), 0.8 * requests_to_send, + requests_to_send); + EXPECT_EQ(client->responseTimeouts(), 0); + // Almost no client sockets are rudely closed by server / almost no client + // sockets are reset. + EXPECT_IN_RANGE(client->remoteCloses(), 0, 0.2 * connections_to_initiate); + + // Policy server accept callback is called at least once (h2 socket reuse + // means may only be called once) + EXPECT_GE(policy_cluster.connectionsAccepted(), 1); + // Policy server request callback sees most policy checks + EXPECT_IN_RANGE(policy_cluster.requestsReceived(), 0.8 * requests_to_send, + requests_to_send); + // Policy server closes every connection + EXPECT_EQ(policy_cluster.connectionsAccepted(), + policy_cluster.localCloses() + policy_cluster.remoteCloses()); +#endif + + // Origin server should see no requests since the mixer filter is + // configured to fail closed. + EXPECT_EQ(origin_callbacks.requestsReceived(), 0); +} + +TEST_F(MixerFaultTest, TolerateTelemetryBlackhole) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::err); + + constexpr NetworkFailPolicy fail_policy = NetworkFailPolicy::FAIL_CLOSED; + constexpr uint32_t connections_to_initiate = 30; + constexpr uint32_t requests_to_send = 30 * connections_to_initiate; + + // Origin server immediately sends a simple 200 OK to every request + ServerCallbackHelper origin_callbacks; + + // Over provision the policy cluster to reduce the change it becomes a source + // of error + + ClusterHelper policy_cluster( + {// Send a gRPC success response immediately to every policy check + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::CheckResponse response; + response.mutable_precondition()->mutable_status()->set_code( + google::protobuf::util::error::Code::OK); + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + }), + // Send a gRPC success response immediately to every policy check + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::CheckResponse response; + response.mutable_precondition()->mutable_status()->set_code( + google::protobuf::util::error::Code::OK); + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + }), + // Send a gRPC success response immediately to every policy check + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::CheckResponse response; + response.mutable_precondition()->mutable_status()->set_code( + google::protobuf::util::error::Code::OK); + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + })}); + + ClusterHelper telemetry_cluster( + {// Telemetry receives the telemetry report requests but never sends a + // response. + new ServerCallbackHelper([](ServerConnection &, ServerStream &, + Envoy::Http::HeaderMapPtr &&) { + // eat the request and do nothing + })}); + + LoadGeneratorPtr client = startServers(fail_policy, origin_callbacks, + policy_cluster, telemetry_cluster); + + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + client->run(connections_to_initiate, requests_to_send, std::move(request), + std::chrono::milliseconds(10'000)); + + std::unordered_map counters; + extractCounters("http_mixer_filter", counters); + + // shutdown envoy by destroying it + test_server_ = nullptr; + // wait until the upstreams have closed all connections they accepted. + // shutting down envoy should close them all + origin_callbacks.wait(); + policy_cluster.wait(); + telemetry_cluster.wait(); + + // + // Evaluate test + // + +#ifndef __APPLE__ + // On Linux every connection will be successfully established. + EXPECT_EQ(client->connectSuccesses(), connections_to_initiate); + EXPECT_EQ(client->connectFailures(), 0); + // Client close callback called for every client connection. + EXPECT_EQ(client->localCloses(), connections_to_initiate); + // Client response callback is called for every request sent + EXPECT_EQ(client->responsesReceived(), requests_to_send); + // Every response was a 2xx class + EXPECT_EQ(client->class2xxResponses(), requests_to_send); + EXPECT_EQ(client->class4xxResponses(), 0); + EXPECT_EQ(client->class5xxResponses(), 0); + EXPECT_EQ(client->responseTimeouts(), 0); + // No client sockets are rudely closed by server / no client sockets are + // reset. + EXPECT_EQ(client->remoteCloses(), 0); + + // Origin server should see all requests + EXPECT_EQ(origin_callbacks.requestsReceived(), requests_to_send); + + // Policy server request callback sees every policy check + EXPECT_EQ(requests_to_send, policy_cluster.requestsReceived()); +#else + // MacOS is a bit flakier than Linux, so broaden assetion ranges to reduce + // test flakes. + + // Most connections are successfully established. + EXPECT_IN_RANGE(client->connectSuccesses(), 0.8 * connections_to_initiate, + connections_to_initiate); + EXPECT_IN_RANGE(client->connectFailures(), 0, 0.2 * connections_to_initiate); + // Client close callback usually called for every client connection. + EXPECT_IN_RANGE(client->localCloses(), 0.8 * connections_to_initiate, + connections_to_initiate); + // Client response callback is usually called for every request sent + EXPECT_IN_RANGE(client->responsesReceived(), 0.8 * requests_to_send, + requests_to_send); + // Most responses were a 2xx class + EXPECT_IN_RANGE(client->class2xxResponses(), 0.8 * requests_to_send, + requests_to_send); + EXPECT_EQ(client->class4xxResponses(), 0); + EXPECT_EQ(client->class5xxResponses(), 0); + EXPECT_EQ(client->responseTimeouts(), 0); + // Almost no client sockets are rudely closed by server / almost no client + // sockets are reset. + EXPECT_IN_RANGE(client->remoteCloses(), 0, 0.2 * connections_to_initiate); + + // Origin server should see most requests + EXPECT_IN_RANGE(origin_callbacks.requestsReceived(), 0.8 * requests_to_send, + requests_to_send); + + // Policy server request callback sees most policy checks + EXPECT_IN_RANGE(policy_cluster.requestsReceived(), 0.8 * requests_to_send, + requests_to_send); +#endif + + // Policy server accept callback is called at least once (h2 socket reuse + // means may only be called once) + EXPECT_GE(policy_cluster.connectionsAccepted(), 1); + // Policy server closes every connection + EXPECT_EQ(policy_cluster.connectionsAccepted(), + policy_cluster.localCloses() + policy_cluster.remoteCloses()); + + // Telemetry server accept callback is called at least once (h2 socket reuse + // means may only be called once) + EXPECT_GE(telemetry_cluster.connectionsAccepted(), 1); + + // Assertions against the mixer filter's internal counters. + EXPECT_EQ(counters["http_mixer_filter.total_report_calls"], requests_to_send); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_report_calls_"], 0, + requests_to_send * 0.12); + // All remote reports should time out + EXPECT_EQ(counters["http_mixer_filter.total_remote_report_timeouts_"], + counters["http_mixer_filter.total_remote_report_calls_"]); + EXPECT_EQ(counters["http_mixer_filter.total_remote_report_successes_"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_report_send_errors_"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_report_other_errors_"], 0); +} + +TEST_F(MixerFaultTest, FailOpenAndSendPolicyResponseSlowly) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::err); + + constexpr NetworkFailPolicy fail_policy = NetworkFailPolicy::FAIL_OPEN; + constexpr uint32_t connections_to_initiate = 30 * 30; + constexpr uint32_t requests_to_send = 1 * connections_to_initiate; + + // Origin server immediately sends a simple 200 OK to every request + ServerCallbackHelper origin_callbacks; + + ClusterHelper policy_cluster( + {// Policy server sends a gRPC success response after 60 seconds to every + // policy check + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::CheckResponse response; + response.mutable_precondition()->mutable_status()->set_code( + google::protobuf::util::error::Code::OK); + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(60'000)); + })}); + + ClusterHelper telemetry_cluster( + {// Telemetry server sends a gRPC success response immediately to every + // telemetry report. + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::ReportResponse response; + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + })}); + + LoadGeneratorPtr client = startServers(fail_policy, origin_callbacks, + policy_cluster, telemetry_cluster); + + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + + client->run(connections_to_initiate, requests_to_send, std::move(request), + std::chrono::milliseconds(10'000)); + + // shutdown envoy by destroying it + test_server_ = nullptr; + // wait until the upstreams have closed all connections they accepted. + // shutting down envoy should close them all + origin_callbacks.wait(); + policy_cluster.wait(); + telemetry_cluster.wait(); + + // + // Evaluate test + // + +#ifndef __APPLE__ + // All connections are successfully established + EXPECT_EQ(client->connectSuccesses(), connections_to_initiate); + EXPECT_EQ(client->connectFailures(), 0); + // Client close callback called for every client connection. + EXPECT_EQ(client->localCloses(), connections_to_initiate); + // Client response callback is called for every request sent + EXPECT_EQ(client->responsesReceived(), requests_to_send); + // Every response was a 2xx class + EXPECT_EQ(client->class2xxResponses(), requests_to_send); + EXPECT_EQ(client->class4xxResponses(), 0); + EXPECT_EQ(client->class5xxResponses(), 0); + EXPECT_EQ(client->responseTimeouts(), 0); + // No client sockets are rudely closed by server / no client sockets are + // reset. + EXPECT_EQ(client->remoteCloses(), 0); + + // Origin server should see every requests since the mixer filter is + // configured to fail open. + EXPECT_EQ(origin_callbacks.requestsReceived(), requests_to_send); + + // Policy server accept callback is called at least once (h2 socket reuse + // means may only be called once) + EXPECT_GE(policy_cluster.connectionsAccepted(), 1); + // Policy server request callback sees every policy check + EXPECT_EQ(requests_to_send, policy_cluster.requestsReceived()); + // Policy server closes every connection + EXPECT_EQ(policy_cluster.connectionsAccepted(), + policy_cluster.localCloses() + policy_cluster.remoteCloses()); +#else + // MacOS is a bit flakier than Linux, so broaden assetion ranges to reduce + // test flakes. + + // Most connections are successfully established. + EXPECT_IN_RANGE(client->connectSuccesses(), 0.8 * connections_to_initiate, + connections_to_initiate); + EXPECT_IN_RANGE(client->connectFailures(), 0, 0.2 * connections_to_initiate); + // Client close callback usually called for every client connection. + EXPECT_IN_RANGE(client->localCloses(), 0.8 * connections_to_initiate, + connections_to_initiate); + // Client response callback is usually called for every request sent + EXPECT_IN_RANGE(client->responsesReceived(), 0.8 * requests_to_send, + requests_to_send); + // Most responses were a 2xx class + EXPECT_IN_RANGE(client->class2xxResponses(), 0.8 * requests_to_send, + requests_to_send); + EXPECT_EQ(client->class4xxResponses(), 0); + EXPECT_EQ(client->class5xxResponses(), 0); + EXPECT_EQ(client->responseTimeouts(), 0); + // Almost no client sockets are rudely closed by server / almost no client + // sockets are reset. + EXPECT_IN_RANGE(client->remoteCloses(), 0, 0.2 * connections_to_initiate); + + // Origin server should see most requests since the mixer filter is + // configured to fail open. + EXPECT_IN_RANGE(origin_callbacks.requestsReceived(), 0.8 * requests_to_send, + requests_to_send); + + // Policy server accept callback is called at least once (h2 socket reuse + // means may only be called once) + EXPECT_GE(policy_cluster.connectionsAccepted(), 1); + // Policy server request callback sees most policy checks + EXPECT_IN_RANGE(policy_cluster.requestsReceived(), 0.8 * requests_to_send, + requests_to_send); + // Policy server closes every connection + EXPECT_EQ(policy_cluster.connectionsAccepted(), + policy_cluster.localCloses() + policy_cluster.remoteCloses()); +#endif +} + +TEST_F(MixerFaultTest, RetryOnTransportError) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::err); + + uint32_t retries = 10; + uint32_t base_retry_ms = 1; + uint32_t max_retry_ms = 10; + constexpr NetworkFailPolicy fail_policy = NetworkFailPolicy::FAIL_CLOSED; + constexpr uint32_t connections_to_initiate = 30; + constexpr uint32_t requests_to_send = 30 * connections_to_initiate; + + // + // Setup + // + + // Origin server immediately sends a simple 200 OK to every request + ServerCallbackHelper origin_callbacks; + + ClusterHelper policy_cluster( + {// One policy server immediately closes any connection accepted. + new ServerCallbackHelper( + [](ServerConnection &, ServerStream &, + Envoy::Http::HeaderMapPtr &&) { + GTEST_FATAL_FAILURE_( + "Connections immediately closed so no response should be " + "received"); + }, + [](ServerConnection &) -> ServerCallbackResult { + return ServerCallbackResult::CLOSE; + }), + // Two other policy servers immediately send gRPC OK responses + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::CheckResponse response; + response.mutable_precondition()->mutable_status()->set_code( + google::protobuf::util::error::Code::OK); + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + }), + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::CheckResponse response; + response.mutable_precondition()->mutable_status()->set_code( + google::protobuf::util::error::Code::OK); + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + })}); + + ClusterHelper telemetry_cluster( + {// Telemetry server sends a gRPC success response immediately to every + // telemetry report. + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::ReportResponse response; + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + })}); + + LoadGeneratorPtr client = + startServers(fail_policy, origin_callbacks, policy_cluster, + telemetry_cluster, retries, base_retry_ms, max_retry_ms); + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + client->run(connections_to_initiate, requests_to_send, std::move(request)); + + std::unordered_map counters; + extractCounters("http_mixer_filter", counters); + + // shutdown envoy by destroying it + test_server_ = nullptr; + // wait until the upstreams have closed all connections they accepted. + // shutting down envoy should close them all + origin_callbacks.wait(); + policy_cluster.wait(); + telemetry_cluster.wait(); + + // + // Evaluate test + // + + // All client connections are successfully established. + EXPECT_EQ(client->connectSuccesses(), connections_to_initiate); + EXPECT_EQ(client->connectFailures(), 0); + // Client close callback called for every client connection. + EXPECT_EQ(client->localCloses(), connections_to_initiate); + // Client response callback is called for every request sent + EXPECT_EQ(client->responsesReceived(), requests_to_send); + // Every response was a 2xx class + EXPECT_EQ(client->class2xxResponses(), requests_to_send); + EXPECT_EQ(client->class4xxResponses(), 0); + EXPECT_EQ(client->class5xxResponses(), 0); + EXPECT_EQ(client->responseTimeouts(), 0); + // No client sockets are rudely closed by server / no client sockets are + // reset. + EXPECT_EQ(client->remoteCloses(), 0); + + // assert that the origin request callback is called for every client request + // sent + EXPECT_EQ(origin_callbacks.requestsReceived(), requests_to_send); + + // assert that the policy request callback is called for every client request + // sent + EXPECT_EQ(policy_cluster.requestsReceived(), requests_to_send); + + // Assertions against the mixer filter's internal counters. + EXPECT_EQ(counters["http_mixer_filter.total_remote_call_other_errors"], 0); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_call_retries"], + requests_to_send / 2 - requests_to_send / 10, + requests_to_send / 2 + requests_to_send / 10); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_hits"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_call_cancellations"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_accepts"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_check_denies"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_misses"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_calls"], requests_to_send); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_hits"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_call_successes"], + requests_to_send); + EXPECT_EQ(counters["http_mixer_filter.total_remote_call_timeouts"], 0); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_call_send_errors"], + requests_to_send / 2 - requests_to_send / 10, + requests_to_send / 2 + requests_to_send / 10); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_denies"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_misses"], + requests_to_send); + EXPECT_EQ(counters["http_mixer_filter.total_quota_calls"], 0); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_report_calls"], 0, + counters["http_mixer_filter.total_report_calls"] * 0.12); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_prefetch_calls"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_check_calls"], + requests_to_send); + EXPECT_EQ(counters["http_mixer_filter.total_report_calls"], requests_to_send); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_hit_denies"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_check_calls"], requests_to_send); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_hit_accepts"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_hit_accepts"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_calls"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_check_accepts"], + requests_to_send); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_hit_denies"], 0); +} + +TEST_F(MixerFaultTest, CancelCheck) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::err); + + uint32_t retries = 10; + uint32_t base_retry_ms = 1; + uint32_t max_retry_ms = 10; + constexpr NetworkFailPolicy fail_policy = NetworkFailPolicy::FAIL_CLOSED; + constexpr uint32_t connections_to_initiate = 30; + constexpr uint32_t requests_to_send = 30 * connections_to_initiate; + + // + // Setup + // + + // Origin server immediately sends a simple 200 OK to every request + ServerCallbackHelper origin_callbacks; + + ClusterHelper policy_cluster( + {// One policy server immediately closes any connection accepted. + new ServerCallbackHelper( + [](ServerConnection &, ServerStream &, + Envoy::Http::HeaderMapPtr &&) { + GTEST_FATAL_FAILURE_( + "Connections immediately closed so no response should be " + "received"); + }, + [](ServerConnection &) -> ServerCallbackResult { + return ServerCallbackResult::CLOSE; + }), + // One policy server is really slow - client will timeout first and + // cancel check + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::CheckResponse response; + response.mutable_precondition()->mutable_status()->set_code( + google::protobuf::util::error::Code::OK); + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(60'000)); + }), + // One policy server is nice and zippy + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::CheckResponse response; + response.mutable_precondition()->mutable_status()->set_code( + google::protobuf::util::error::Code::OK); + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + })}); + + ClusterHelper telemetry_cluster( + {// Telemetry server sends a gRPC success response immediately to every + // telemetry report. + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::ReportResponse response; + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + })}); + + LoadGeneratorPtr client = + startServers(fail_policy, origin_callbacks, policy_cluster, + telemetry_cluster, retries, base_retry_ms, max_retry_ms); + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + client->run(connections_to_initiate, requests_to_send, std::move(request), + std::chrono::milliseconds(5'000)); + + std::unordered_map counters; + extractCounters("http_mixer_filter", counters); + + // shutdown envoy by destroying it + test_server_ = nullptr; + // wait until the upstreams have closed all connections they accepted. + // shutting down envoy should close them all + origin_callbacks.wait(); + policy_cluster.wait(); + telemetry_cluster.wait(); + + // + // Evaluate test + // + + // All client connections are successfully established. + EXPECT_EQ(client->connectSuccesses(), connections_to_initiate); + EXPECT_EQ(client->connectFailures(), 0); + // Client close callback called for every client connection. + EXPECT_EQ(client->localCloses(), connections_to_initiate); + // Not all responses are received due to timeouts + EXPECT_LE(client->responsesReceived(), requests_to_send); + EXPECT_GE(client->responsesReceived(), 1); + // Every response was a 2xx class + EXPECT_EQ(client->class2xxResponses(), client->responsesReceived()); + EXPECT_EQ(client->class4xxResponses(), 0); + EXPECT_EQ(client->class5xxResponses(), 0); + // Or a timeout. Implementational artifact: timeouts kill the connection and + // new connections are not created to take their place. + EXPECT_EQ(connections_to_initiate, client->responseTimeouts()); + // No client sockets are rudely closed by server. They timeout instead. + EXPECT_EQ(client->remoteCloses(), 0); + + // assert that the origin request callback is called for every response + // received by the client. + EXPECT_GE(origin_callbacks.requestsReceived(), client->responsesReceived()); + + // assert that the policy request callback is called for every response + // received by the client. + EXPECT_GE(policy_cluster.requestsReceived(), client->responsesReceived()); + +#ifdef __APPLE__ + // Envoy doesn't detect client disconnects on MacOS so any outstanding + // requests to the policy server won't be cancelled. See + // https://github.com/envoyproxy/envoy/issues/4294 + return; +#endif + + // Assertions against the mixer filter's internal counters. Many of these + // assertions rely on an implementational artifact of the load generator + // client - when a request is cancelled due to timeout the connection is + // closed. With enough retries every connection we create will be closed due + // to cancellation/timeout. + EXPECT_EQ(counters["http_mixer_filter.total_remote_call_other_errors"], 0); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_call_retries"], + connections_to_initiate / 2, 2 * connections_to_initiate); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_hits"], 0); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_call_cancellations"], + connections_to_initiate * 0.8, connections_to_initiate); + EXPECT_GE(counters["http_mixer_filter.total_remote_calls"], + connections_to_initiate); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_accepts"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_check_denies"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_misses"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_hits"], 0); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_call_successes"], + connections_to_initiate / 2, 2 * connections_to_initiate); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_call_timeouts"], 0, + connections_to_initiate); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_call_send_errors"], + counters["http_mixer_filter.total_remote_calls"] / 4, + counters["http_mixer_filter.total_remote_calls"]); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_denies"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_misses"], + counters["http_mixer_filter.total_remote_calls"]); + EXPECT_EQ(counters["http_mixer_filter.total_quota_calls"], 0); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_report_calls"], 0, + counters["http_mixer_filter.total_report_calls"] * 0.12); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_prefetch_calls"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_check_calls"], + counters["http_mixer_filter.total_remote_calls"]); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_report_calls"], + counters["http_mixer_filter.total_remote_calls"] * 0.75, + counters["http_mixer_filter.total_remote_calls"]); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_hit_denies"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_check_calls"], + counters["http_mixer_filter.total_remote_calls"]); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_hit_accepts"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_hit_accepts"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_calls"], 0); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_check_accepts"], + counters["http_mixer_filter.total_remote_calls"] / 4, + counters["http_mixer_filter.total_remote_calls"]); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_hit_denies"], 0); +} + +TEST_F(MixerFaultTest, CancelRetry) { + Envoy::Logger::Registry::setLogLevel(spdlog::level::err); + + // Force client timeout while requests are waiting between retries. + uint32_t retries = 1; + uint32_t base_retry_ms = 10'000; + uint32_t max_retry_ms = 10'000; + constexpr NetworkFailPolicy fail_policy = NetworkFailPolicy::FAIL_CLOSED; + constexpr uint32_t connections_to_initiate = 30; + constexpr uint32_t requests_to_send = 30 * connections_to_initiate; + + // + // Setup + // + + // Origin server immediately sends a simple 200 OK to every request + ServerCallbackHelper origin_callbacks; + + ClusterHelper policy_cluster( + {// One policy server immediately closes any connection accepted. + new ServerCallbackHelper( + [](ServerConnection &, ServerStream &, + Envoy::Http::HeaderMapPtr &&) { + GTEST_FATAL_FAILURE_( + "Connections immediately closed so no response should be " + "received"); + }, + [](ServerConnection &) -> ServerCallbackResult { + return ServerCallbackResult::CLOSE; + })}); + + ClusterHelper telemetry_cluster( + {// Telemetry server sends a gRPC success response immediately to every + // telemetry report. + new ServerCallbackHelper([](ServerConnection &, ServerStream &stream, + Envoy::Http::HeaderMapPtr &&) { + ::istio::mixer::v1::ReportResponse response; + stream.sendGrpcResponse(Envoy::Grpc::Status::Ok, response, + std::chrono::milliseconds(0)); + })}); + + LoadGeneratorPtr client = + startServers(fail_policy, origin_callbacks, policy_cluster, + telemetry_cluster, retries, base_retry_ms, max_retry_ms); + // + // Exec test and wait for it to finish + // + + Envoy::Http::HeaderMapPtr request{ + new Envoy::Http::TestHeaderMapImpl{{":method", "GET"}, + {":path", "/"}, + {":scheme", "http"}, + {":authority", "host"}}}; + client->run(connections_to_initiate, requests_to_send, std::move(request), + std::chrono::milliseconds(500)); + + std::unordered_map counters; + extractCounters("http_mixer_filter", counters); + + // shutdown envoy by destroying it + test_server_ = nullptr; + // wait until the upstreams have closed all connections they accepted. + // shutting down envoy should close them all + origin_callbacks.wait(); + policy_cluster.wait(); + telemetry_cluster.wait(); + + // + // Evaluate test + // + + // All client connections are successfully established. + EXPECT_EQ(client->connectSuccesses(), connections_to_initiate); + EXPECT_EQ(client->connectFailures(), 0); + // Client close callback called for every client connection. + EXPECT_EQ(client->localCloses(), connections_to_initiate); + // Client doesn't receive any responses + EXPECT_EQ(client->responsesReceived(), 0); + EXPECT_EQ(client->class2xxResponses(), 0); + EXPECT_EQ(client->class4xxResponses(), 0); + EXPECT_EQ(client->class5xxResponses(), 0); + // All requests timeout. Implementational artifact: timeouts kill the + // connection and new connections are not created to take their place. + EXPECT_EQ(connections_to_initiate, client->responseTimeouts()); + // No client sockets are rudely closed by server / no client sockets are + // reset. + EXPECT_EQ(client->remoteCloses(), 0); + + // The origin server receives no requests + EXPECT_EQ(origin_callbacks.requestsReceived(), 0); + + // The policy server receives no requests + EXPECT_EQ(policy_cluster.requestsReceived(), 0); + + // Assertions against the mixer filter's internal counters. Many of these + // assertions rely on an implementational artifact of the load generator + // client - when a request is cancelled due to timeout the connection is + // closed. With enough retries every connection we create will be closed due + // to cancellation/timeout. + EXPECT_EQ(counters["http_mixer_filter.total_remote_call_other_errors"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_call_retries"], + connections_to_initiate); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_hits"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_call_cancellations"], 0); + EXPECT_GE(counters["http_mixer_filter.total_remote_calls"], + connections_to_initiate); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_accepts"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_check_denies"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_misses"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_hits"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_call_successes"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_call_timeouts"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_call_send_errors"], + connections_to_initiate); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_denies"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_misses"], + counters["http_mixer_filter.total_remote_calls"]); + EXPECT_EQ(counters["http_mixer_filter.total_quota_calls"], 0); + EXPECT_IN_RANGE(counters["http_mixer_filter.total_remote_report_calls"], 0, + counters["http_mixer_filter.total_report_calls"] * 0.12); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_prefetch_calls"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_check_calls"], + counters["http_mixer_filter.total_remote_calls"]); + // TODO(jblatt) report calls are not made if client disconnects first. Bug: + EXPECT_IN_RANGE(counters["http_mixer_filter.total_report_calls"], 0, + counters["http_mixer_filter.total_remote_calls"]); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_hit_denies"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_check_calls"], + counters["http_mixer_filter.total_remote_calls"]); + EXPECT_EQ(counters["http_mixer_filter.total_check_cache_hit_accepts"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_hit_accepts"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_quota_calls"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_remote_check_accepts"], 0); + EXPECT_EQ(counters["http_mixer_filter.total_quota_cache_hit_denies"], 0); +} + +} // namespace Integration +} // namespace Mixer